hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
af0dcad2d3cf90f9ecb7e16d4056cd63002a547b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal d
*/
/*
blk_M=64 blk_N=64 blk_K=16 nthd_x=64 nthd_y=4
*/
#include "common_magma.h"
#include "commonblas_d.h"
#define magmablas_dgemm_fermi magmablas_dgemm
texture<int2,1> tex_x_double_A;
texture<int2,1> tex_x_double_B;
static __inline__ __device__ double fetch_x_A(const int& i)
{
register int2 v = tex1Dfetch(tex_x_double_A, i);
return __hiloint2double(v.y, v.x);
}
static __inline__ __device__ double fetch_x_B(const int& i)
{
register int2 v = tex1Dfetch(tex_x_double_B, i);
return __hiloint2double(v.y, v.x);
}
extern "C" __global__ void
fermiDgemm_v2_kernel_NN(double *C, const double *A, const double *B,
int m, int n, int k, int lda, int ldb,
int ldc, double alpha, double beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * 64;
const int ibx = blockIdx.x * 64;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
__shared__ double Abs[64][17];
__shared__ double Bb[16][65];
int tll = ty2;
double xxA[4];
double xxB[4];
//int trackA = offsetA + ibx +__mul24( ty2, lda) + tx2 ;
//A += trackA;
A += (offsetA + ibx +__mul24( ty2, lda) + tx2);
//int trackB = offsetB + tx2+ __mul24(iby + ty2 * 4, ldb );
//B += trackB;
B += (offsetB + tx2+ __mul24(iby + ty2 * 4, ldb ));
#pragma unroll
for(int y=0; y<4; y++)
Abs[tx2+ y*16][ty2] = /* (tll<k)* */ A[y*16] ;
//Abs[tx2+ y*16][ty2] = /* (tll<k)* */ fetch_x_A(trackA + y*16) ;
#pragma unroll
for(int y=0; y<4; y++)
Bb[tx2][ty2*4+y] = B[y * ldb] ;
// Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb) ;
__syncthreads();
double Axs[4];
double Bxp[4];
double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
int k1;
for(k1=0; k1<(k-16); k1+=16)
{
tll+=16;
A += lda *16 ;
B += 16;
//trackA += 16*lda ;
//trackB += 16;
#pragma unroll
for( int y=0; y<4; y++)
xxA[y] = /* (tll<k)* */ A[y*16];
// xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + y*16);
#pragma unroll
for( int y=0; y<4; y++)
xxB[y] = B[y*ldb];
// xxB[y] = fetch_x_B( trackB + y*ldb);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++)
Axs[y] = Abs[tx2+y*16][j1] ;
#pragma unroll
for( int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2+y*16];
#pragma unroll
for( int x=0; x<4; x++)
{
#pragma unroll
for( int y=0; y<4; y++)
{
Cb[x*4+y] += Axs[x]*Bxp[y];
}
}
}
__syncthreads();
#pragma unroll
for(int y=0; y<4; y++)
Abs[tx2+y*16][ty2] = xxA[y];
#pragma unroll
for(int y=0; y<4; y++)
Bb[tx2][ty2*4 + y] = xxB[y];
__syncthreads();
}
C += tx2 + ibx + __mul24 (ty2 + iby ,ldc);
#pragma unroll
for(int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++)
Axs[y] = Abs[tx2 + y*16][j1] ;
#pragma unroll
for( int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<4; x++)
{
#pragma unroll
for( int y=0;y<4; y++)
{
Cb[x*4 + y] += Axs[x]*Bxp[y];
}
}
}
int gy = iby + ty2;
#pragma unroll
for( int y=0;y<4;y++, gy+=16)
{
int gx = ibx + tx2;
#pragma unroll
for(int x=0;x<4;x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y+x*4] + beta * C[x*16];
}
C += ldc*16;
}
}
extern "C" __global__ void
fermiDgemm_v2_kernel_TN(double *C, const double *A, const double *B,
int m, int n, int k, int lda, int ldb,
int ldc, double alpha, double beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * 64;
const int ibx = blockIdx.x * 64;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
__shared__ double Bb[16][65];
__shared__ double Abs[64][17];
double xxA[4];
double xxB[4];
int trackA = offsetA + tx2 + __mul24( ibx + ty2*4, lda );
int trackB = offsetB + tx2 + __mul24( iby + ty2*4, ldb );
A+= trackA;
B+= trackB;
int tll = tx2;
#pragma unroll
for(int y=0; y<4; y++)
Abs[ty2*4+y][tx2] = (tll<k)* fetch_x_A(trackA + y*lda);
#pragma unroll
for(int y=0; y<4; y++)
Bb[tx2][ty2*4+y] = /* (tll<k)* */ fetch_x_B( trackB + y*ldb );
__syncthreads();
double Axs[4];
double Bxp[4];
double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
int k1;
for(k1=0; k1<(k-16); k1+=16)
{
tll +=16;
B += 16;
A += 16 ;
trackA+=16 ;
trackB+=16;
#pragma unroll
for(int y=0; y<4; y++)
xxA[y] = (tll<k)* fetch_x_A(trackA + y*lda);
#pragma unroll
for(int y=0; y<4; y++)
xxB[y] = /* (tll<k)* */ fetch_x_B(trackB + y*ldb);
#pragma unroll
for(int j1=0;j1<16;j1++)
{
#pragma unroll
for(int y=0; y<4; y++)
Axs[y] = Abs[tx2+y*16][j1];
#pragma unroll
for(int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2+y*16];
#pragma unroll
for(int x=0; x<4; x++)
{
#pragma unroll
for(int y=0; y<4; y++)
{
Cb[x*4+y] += Axs[x]*Bxp[y];
}
}
}
__syncthreads();
#pragma unroll
for(int y=0; y<4; y++)
Abs[ty2*4+y][tx2] = xxA[y];
#pragma unroll
for(int y=0; y<4; y++)
Bb[tx2][ty2*4+y] =xxB[y];
__syncthreads();
}
C += tx2 + ibx + __mul24 (ty2 + iby ,ldc);
#pragma unroll
for(int j1=0; j1<16; j1++)
{
#pragma unroll
for(int y=0; y<4; y++)
Axs[y] = Abs[tx2+y*16][j1];
#pragma unroll
for(int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2+y*16];
#pragma unroll
for(int x=0; x<4; x++)
{
#pragma unroll
for(int y=0; y<4; y++)
{
Cb[x*4+y] += Axs[x]*Bxp[y];
}
}
}
int gy = iby+ty2;
#pragma unroll
for(int y=0;y<4;y++, gy+=16)
{
int gx = ibx+tx2;
#pragma unroll
for(int x=0;x<4;x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] =alpha*Cb[y+x*4] + beta * C[x*16];
}
C+=ldc*16;
}
}
extern "C" __global__ void
fermiDgemm_v2_kernel_TT(double *C, const double *A, const double *B,
int m, int n, int k, int lda, int ldb,
int ldc, double alpha, double beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * 64;
const int ibx = blockIdx.x * 64;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
__shared__ double Bb[16][65];
__shared__ double Abs[64][17];
double xxA[4];
double xxB[4];
int trackA = offsetA + __mul24( ibx + ty2, lda) + tx2;
int trackB = offsetB + iby+ tx2 + __mul24(ty2, ldb);
A += trackA;
B += trackB;
int tll = tx2;
#pragma unroll
for(int y=0; y<4; y++)
Abs[ty2+16*y][tx2] = /* (tll<k)* */ fetch_x_A(trackA + lda*16*y);
#pragma unroll
for(int y=0; y<4; y++)
Bb[ty2][tx2+16*y] = fetch_x_B(trackB+16*y);
__syncthreads();
double Axs[4];
double Bxp[4];
double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
int k1;
for(k1=0; k1<(k-16); k1+=16)
{
tll+=16;
A += 16;
B += 16*ldb;
trackA+=16;
trackB+=16*ldb;
#pragma unroll
for( int y=0; y<4; y++)
xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + lda*y*16);
#pragma unroll
for( int y=0; y<4; y++)
xxB[y] = fetch_x_B(trackB + 16*y);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++)
Axs[y] = Abs[tx2 + y*16][j1];
#pragma unroll
for( int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<4; x++)
#pragma unroll
for( int y=0;y<4;y++)
Cb[x*4+y] += Axs[x]*Bxp[y];
}
__syncthreads();
#pragma unroll
for( int y=0; y<4; y++)
Abs[ty2 + 16*y][tx2] = xxA[y];
#pragma unroll
for( int y=0; y<4; y++)
Bb[ty2][tx2+y*16] = xxB[y];
__syncthreads();
}
C += tx2 + ibx + __mul24 (ty2 + iby ,ldc);
#pragma unroll
for( int j1=0; j1<16; j1++)
{
#pragma unroll
for( int y=0; y<4; y++)
Axs[y] = Abs[tx2 + y*16][j1];
#pragma unroll
for( int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<4; x++)
#pragma unroll
for( int y=0; y<4; y++)
Cb[x*4+y] += Axs[x]*Bxp[y];
}
int gy = iby + ty2;
#pragma unroll
for( int y=0; y<4; y++, gy+=16)
{
int gx = ibx + tx2;
#pragma unroll
for(int x=0; x<4; x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y+x*4] + beta * C[x*16];
}
C+=ldc*16;
}
}
extern "C" __global__ void
fermiDgemm_v2_kernel_NT(double *C, const double *A, const double *B,
int m, int n, int k, int lda, int ldb,
int ldc, double alpha, double beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * 64;
const int ibx = blockIdx.x * 64;
const int idt = ty * 64 + tx;
const int tx2= idt%16;
const int ty2= idt/16;
__shared__ double Bb[16][65];
__shared__ double Abs[64][17];
double xxA[4];
double xxB[4];
int trackA = offsetA + ibx +__mul24(ty2, lda) + tx2 ;
int trackB = offsetB + iby + tx2 + __mul24(ty2, ldb);
A+= trackA;
B += trackB;
int tll = ty2;
#pragma unroll
for(int y=0; y<4; y++)
Abs[tx2+ y*16][ty2] = /* (tll<k)* */ fetch_x_A(trackA + y*16);
#pragma unroll
for(int y=0; y<4; y++)
Bb[ty2][tx2+16*y] = /* (tll<k)* */ fetch_x_B(trackB+16*y);
__syncthreads();
double Axs[4];
double Bxp[4];
double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
int k1;
for(k1=0; k1<(k-16); k1+=16)
{
tll += 16;
A += lda *16 ;
B += 16*ldb;
trackA+=16*lda ;
trackB+=16*ldb;
#pragma unroll
for( int y=0; y<4; y++)
xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + y*16);
#pragma unroll
for( int y=0; y<4; y++)
xxB[y] = /* (tll<k)* */ fetch_x_B( trackB + 16*y);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int y=0; y<4; y++)
Axs[y] = Abs[tx2 + y*16][j1] ;
#pragma unroll
for( int x=0; x<4; x++)
#pragma unroll
for( int y=0; y<4; y++)
Cb[x*4+y] += Axs[x]*Bxp[y];
}
__syncthreads();
#pragma unroll
for( int y=0; y<4; y++)
Abs[tx2 + y*16][ty2] = xxA[y];
#pragma unroll
for( int y=0; y<4; y++)
Bb[ty2][tx2+y*16] = xxB[y];
__syncthreads();
}
C += tx2 + ibx + __mul24(ty2 + iby ,ldc);
#pragma unroll
for(int j1=0; j1<16; j1++)
{
#pragma unroll
for( int y=0; y<4; y++)
Bxp[y] = Bb[j1][ty2 + y*16];
#pragma unroll
for( int y=0; y<4; y++)
Axs[y] = Abs[tx2 + y*16][j1] ;
#pragma unroll
for( int x=0; x<4; x++)
#pragma unroll
for( int y=0;y<4;y++)
Cb[x*4+y] += Axs[x]*Bxp[y];
}
int gy = iby + ty2;
#pragma unroll
for( int y=0; y<4; y++, gy+=16)
{
int gx = ibx + tx2;
#pragma unroll
for(int x=0; x<4; x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y + x*4] + beta * C[x*16];
}
C+=ldc*16;
}
}
extern "C" void
magmablas_dgemm_fermi( char TRANSA, char TRANSB, int m , int n , int k ,
double alpha, const double *A, int lda,
const double *B, int ldb,
double beta, double *C, int ldc )
{
/* -- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
Purpose
=======
DGEMM performs one of the matrix-matrix operations
C := alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or op( X ) = X',
alpha and beta are scalars, and A, B and C are matrices, with op( A )
an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
==========
TRANSA - CHARACTER*1.
On entry, TRANSA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
TRANSA = 'N' or 'n', op( A ) = A.
TRANSA = 'T' or 't', op( A ) = A'.
TRANSA = 'C' or 'c', op( A ) = A'.
Unchanged on exit.
TRANSB - CHARACTER*1.
On entry, TRANSB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
TRANSB = 'N' or 'n', op( B ) = B.
TRANSB = 'T' or 't', op( B ) = B'.
TRANSB = 'C' or 'c', op( B ) = B'.
Unchanged on exit.
M - INTEGER.
On entry, M specifies the number of rows of the matrix
op( A ) and of the matrix C. M must be at least zero.
Unchanged on exit.
N - INTEGER.
On entry, N specifies the number of columns of the matrix
op( B ) and the number of columns of the matrix C. N must be
at least zero.
Unchanged on exit.
K - INTEGER.
On entry, K specifies the number of columns of the matrix
op( A ) and the number of rows of the matrix op( B ). K must
be at least zero.
Unchanged on exit.
ALPHA - DOUBLE PRECISION.
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is
k when TRANSA = 'N' or 'n', and is m otherwise.
Before entry with TRANSA = 'N' or 'n', the leading m by k
part of the array A must contain the matrix A, otherwise
the leading k by m part of the array A must contain the
matrix A.
Unchanged on exit.
LDA - INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When TRANSA = 'N' or 'n' then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
Unchanged on exit.
B - DOUBLE PRECISION array of DIMENSION ( LDB, kb ), where kb is
n when TRANSB = 'N' or 'n', and is k otherwise.
Before entry with TRANSB = 'N' or 'n', the leading k by n
part of the array B must contain the matrix B, otherwise
the leading n by k part of the array B must contain the
matrix B.
Unchanged on exit.
LDB - INTEGER.
On entry, LDB specifies the first dimension of B as declared
in the calling (sub) program. When TRANSB = 'N' or 'n' then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
Unchanged on exit.
BETA - DOUBLE PRECISION.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then C need not be set on input.
Unchanged on exit.
C - DOUBLE PRECISION array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array C must
contain the matrix C, except when beta is zero, in which
case C need not be set on entry.
On exit, the array C is overwritten by the m by n matrix
( alpha*op( A )*op( B ) + beta*C ).
LDC - INTEGER.
On entry, LDC specifies the first dimension of C as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
Unchanged on exit.
===================================================================== */
if (m<=0 || n<=0 || k<=0)
return;
size_t offsetA = 0;
size_t offsetB = 0;
int TransA = 1, TransB = 1;
if (TRANSA == 'N' || TRANSA == 'n')
TransA = 0;
if (TRANSB == 'N' || TRANSB == 'n')
TransB = 0;
size_t sizeA = (size_t) lda * (size_t) (!TransA ? k : m);
size_t sizeB = (size_t) ldb * (size_t) (!TransB ? n : k);
// size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512) / 2;
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if (sizeA>=CUBLAS_MAX_1DBUF_SIZE ||
sizeB>=CUBLAS_MAX_1DBUF_SIZE )
{
// printf("Exceeding texuture limit (CUBLAS_MAX_1DBUF_SIZE=%ld), using hipblasSgemm\n", CUBLAS_MAX_1DBUF_SIZE);
hipblasDgemm(TRANSA, TRANSB, m, n, k, alpha,
A, lda, B, ldb,
beta, C, ldc);
return;
}
hipError_t errt;
errt = hipBindTexture(&offsetA, tex_x_double_A, (int2 *)A,
sizeA * sizeof(A[0]));
if( errt != hipSuccess)
{
printf("can not bind to texture \n");
return;
}
errt = hipBindTexture(&offsetB, tex_x_double_B, (int2 *)B,
sizeB * sizeof(B[0]));
if( errt != hipSuccess)
{
printf("can not bind to texture \n");
return;
}
dim3 threads( 64, 4 );
dim3 grid(m/(64)+(m%(64)!=0),n/(64)+(n%(64)!=0));
offsetA = offsetA/sizeof(A[0]);
offsetB = offsetB/sizeof(B[0]);
if ( TransB )
if ( !TransA )
hipLaunchKernelGGL(( fermiDgemm_v2_kernel_NT), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
else
hipLaunchKernelGGL(( fermiDgemm_v2_kernel_TT), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
else
if ( !TransA )
hipLaunchKernelGGL(( fermiDgemm_v2_kernel_NN), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
else
hipLaunchKernelGGL(( fermiDgemm_v2_kernel_TN), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
hipUnbindTexture ( tex_x_double_A ) ;
hipUnbindTexture ( tex_x_double_B ) ;
}
| af0dcad2d3cf90f9ecb7e16d4056cd63002a547b.cu | /*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal d
*/
/*
blk_M=64 blk_N=64 blk_K=16 nthd_x=64 nthd_y=4
*/
#include "common_magma.h"
#include "commonblas_d.h"
#define magmablas_dgemm_fermi magmablas_dgemm
texture<int2,1> tex_x_double_A;
texture<int2,1> tex_x_double_B;
static __inline__ __device__ double fetch_x_A(const int& i)
{
register int2 v = tex1Dfetch(tex_x_double_A, i);
return __hiloint2double(v.y, v.x);
}
static __inline__ __device__ double fetch_x_B(const int& i)
{
register int2 v = tex1Dfetch(tex_x_double_B, i);
return __hiloint2double(v.y, v.x);
}
extern "C" __global__ void
fermiDgemm_v2_kernel_NN(double *C, const double *A, const double *B,
int m, int n, int k, int lda, int ldb,
int ldc, double alpha, double beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * 64;
const int ibx = blockIdx.x * 64;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
__shared__ double Abs[64][17];
__shared__ double Bb[16][65];
int tll = ty2;
double xxA[4];
double xxB[4];
//int trackA = offsetA + ibx +__mul24( ty2, lda) + tx2 ;
//A += trackA;
A += (offsetA + ibx +__mul24( ty2, lda) + tx2);
//int trackB = offsetB + tx2+ __mul24(iby + ty2 * 4, ldb );
//B += trackB;
B += (offsetB + tx2+ __mul24(iby + ty2 * 4, ldb ));
#pragma unroll
for(int y=0; y<4; y++)
Abs[tx2+ y*16][ty2] = /* (tll<k)* */ A[y*16] ;
//Abs[tx2+ y*16][ty2] = /* (tll<k)* */ fetch_x_A(trackA + y*16) ;
#pragma unroll
for(int y=0; y<4; y++)
Bb[tx2][ty2*4+y] = B[y * ldb] ;
// Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb) ;
__syncthreads();
double Axs[4];
double Bxp[4];
double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
int k1;
for(k1=0; k1<(k-16); k1+=16)
{
tll+=16;
A += lda *16 ;
B += 16;
//trackA += 16*lda ;
//trackB += 16;
#pragma unroll
for( int y=0; y<4; y++)
xxA[y] = /* (tll<k)* */ A[y*16];
// xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + y*16);
#pragma unroll
for( int y=0; y<4; y++)
xxB[y] = B[y*ldb];
// xxB[y] = fetch_x_B( trackB + y*ldb);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++)
Axs[y] = Abs[tx2+y*16][j1] ;
#pragma unroll
for( int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2+y*16];
#pragma unroll
for( int x=0; x<4; x++)
{
#pragma unroll
for( int y=0; y<4; y++)
{
Cb[x*4+y] += Axs[x]*Bxp[y];
}
}
}
__syncthreads();
#pragma unroll
for(int y=0; y<4; y++)
Abs[tx2+y*16][ty2] = xxA[y];
#pragma unroll
for(int y=0; y<4; y++)
Bb[tx2][ty2*4 + y] = xxB[y];
__syncthreads();
}
C += tx2 + ibx + __mul24 (ty2 + iby ,ldc);
#pragma unroll
for(int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++)
Axs[y] = Abs[tx2 + y*16][j1] ;
#pragma unroll
for( int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<4; x++)
{
#pragma unroll
for( int y=0;y<4; y++)
{
Cb[x*4 + y] += Axs[x]*Bxp[y];
}
}
}
int gy = iby + ty2;
#pragma unroll
for( int y=0;y<4;y++, gy+=16)
{
int gx = ibx + tx2;
#pragma unroll
for(int x=0;x<4;x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y+x*4] + beta * C[x*16];
}
C += ldc*16;
}
}
extern "C" __global__ void
fermiDgemm_v2_kernel_TN(double *C, const double *A, const double *B,
int m, int n, int k, int lda, int ldb,
int ldc, double alpha, double beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * 64;
const int ibx = blockIdx.x * 64;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
__shared__ double Bb[16][65];
__shared__ double Abs[64][17];
double xxA[4];
double xxB[4];
int trackA = offsetA + tx2 + __mul24( ibx + ty2*4, lda );
int trackB = offsetB + tx2 + __mul24( iby + ty2*4, ldb );
A+= trackA;
B+= trackB;
int tll = tx2;
#pragma unroll
for(int y=0; y<4; y++)
Abs[ty2*4+y][tx2] = (tll<k)* fetch_x_A(trackA + y*lda);
#pragma unroll
for(int y=0; y<4; y++)
Bb[tx2][ty2*4+y] = /* (tll<k)* */ fetch_x_B( trackB + y*ldb );
__syncthreads();
double Axs[4];
double Bxp[4];
double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
int k1;
for(k1=0; k1<(k-16); k1+=16)
{
tll +=16;
B += 16;
A += 16 ;
trackA+=16 ;
trackB+=16;
#pragma unroll
for(int y=0; y<4; y++)
xxA[y] = (tll<k)* fetch_x_A(trackA + y*lda);
#pragma unroll
for(int y=0; y<4; y++)
xxB[y] = /* (tll<k)* */ fetch_x_B(trackB + y*ldb);
#pragma unroll
for(int j1=0;j1<16;j1++)
{
#pragma unroll
for(int y=0; y<4; y++)
Axs[y] = Abs[tx2+y*16][j1];
#pragma unroll
for(int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2+y*16];
#pragma unroll
for(int x=0; x<4; x++)
{
#pragma unroll
for(int y=0; y<4; y++)
{
Cb[x*4+y] += Axs[x]*Bxp[y];
}
}
}
__syncthreads();
#pragma unroll
for(int y=0; y<4; y++)
Abs[ty2*4+y][tx2] = xxA[y];
#pragma unroll
for(int y=0; y<4; y++)
Bb[tx2][ty2*4+y] =xxB[y];
__syncthreads();
}
C += tx2 + ibx + __mul24 (ty2 + iby ,ldc);
#pragma unroll
for(int j1=0; j1<16; j1++)
{
#pragma unroll
for(int y=0; y<4; y++)
Axs[y] = Abs[tx2+y*16][j1];
#pragma unroll
for(int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2+y*16];
#pragma unroll
for(int x=0; x<4; x++)
{
#pragma unroll
for(int y=0; y<4; y++)
{
Cb[x*4+y] += Axs[x]*Bxp[y];
}
}
}
int gy = iby+ty2;
#pragma unroll
for(int y=0;y<4;y++, gy+=16)
{
int gx = ibx+tx2;
#pragma unroll
for(int x=0;x<4;x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] =alpha*Cb[y+x*4] + beta * C[x*16];
}
C+=ldc*16;
}
}
extern "C" __global__ void
fermiDgemm_v2_kernel_TT(double *C, const double *A, const double *B,
int m, int n, int k, int lda, int ldb,
int ldc, double alpha, double beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * 64;
const int ibx = blockIdx.x * 64;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
__shared__ double Bb[16][65];
__shared__ double Abs[64][17];
double xxA[4];
double xxB[4];
int trackA = offsetA + __mul24( ibx + ty2, lda) + tx2;
int trackB = offsetB + iby+ tx2 + __mul24(ty2, ldb);
A += trackA;
B += trackB;
int tll = tx2;
#pragma unroll
for(int y=0; y<4; y++)
Abs[ty2+16*y][tx2] = /* (tll<k)* */ fetch_x_A(trackA + lda*16*y);
#pragma unroll
for(int y=0; y<4; y++)
Bb[ty2][tx2+16*y] = fetch_x_B(trackB+16*y);
__syncthreads();
double Axs[4];
double Bxp[4];
double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
int k1;
for(k1=0; k1<(k-16); k1+=16)
{
tll+=16;
A += 16;
B += 16*ldb;
trackA+=16;
trackB+=16*ldb;
#pragma unroll
for( int y=0; y<4; y++)
xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + lda*y*16);
#pragma unroll
for( int y=0; y<4; y++)
xxB[y] = fetch_x_B(trackB + 16*y);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++)
Axs[y] = Abs[tx2 + y*16][j1];
#pragma unroll
for( int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<4; x++)
#pragma unroll
for( int y=0;y<4;y++)
Cb[x*4+y] += Axs[x]*Bxp[y];
}
__syncthreads();
#pragma unroll
for( int y=0; y<4; y++)
Abs[ty2 + 16*y][tx2] = xxA[y];
#pragma unroll
for( int y=0; y<4; y++)
Bb[ty2][tx2+y*16] = xxB[y];
__syncthreads();
}
C += tx2 + ibx + __mul24 (ty2 + iby ,ldc);
#pragma unroll
for( int j1=0; j1<16; j1++)
{
#pragma unroll
for( int y=0; y<4; y++)
Axs[y] = Abs[tx2 + y*16][j1];
#pragma unroll
for( int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<4; x++)
#pragma unroll
for( int y=0; y<4; y++)
Cb[x*4+y] += Axs[x]*Bxp[y];
}
int gy = iby + ty2;
#pragma unroll
for( int y=0; y<4; y++, gy+=16)
{
int gx = ibx + tx2;
#pragma unroll
for(int x=0; x<4; x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y+x*4] + beta * C[x*16];
}
C+=ldc*16;
}
}
extern "C" __global__ void
fermiDgemm_v2_kernel_NT(double *C, const double *A, const double *B,
int m, int n, int k, int lda, int ldb,
int ldc, double alpha, double beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * 64;
const int ibx = blockIdx.x * 64;
const int idt = ty * 64 + tx;
const int tx2= idt%16;
const int ty2= idt/16;
__shared__ double Bb[16][65];
__shared__ double Abs[64][17];
double xxA[4];
double xxB[4];
int trackA = offsetA + ibx +__mul24(ty2, lda) + tx2 ;
int trackB = offsetB + iby + tx2 + __mul24(ty2, ldb);
A+= trackA;
B += trackB;
int tll = ty2;
#pragma unroll
for(int y=0; y<4; y++)
Abs[tx2+ y*16][ty2] = /* (tll<k)* */ fetch_x_A(trackA + y*16);
#pragma unroll
for(int y=0; y<4; y++)
Bb[ty2][tx2+16*y] = /* (tll<k)* */ fetch_x_B(trackB+16*y);
__syncthreads();
double Axs[4];
double Bxp[4];
double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
int k1;
for(k1=0; k1<(k-16); k1+=16)
{
tll += 16;
A += lda *16 ;
B += 16*ldb;
trackA+=16*lda ;
trackB+=16*ldb;
#pragma unroll
for( int y=0; y<4; y++)
xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + y*16);
#pragma unroll
for( int y=0; y<4; y++)
xxB[y] = /* (tll<k)* */ fetch_x_B( trackB + 16*y);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int y=0; y<4; y++)
Axs[y] = Abs[tx2 + y*16][j1] ;
#pragma unroll
for( int x=0; x<4; x++)
#pragma unroll
for( int y=0; y<4; y++)
Cb[x*4+y] += Axs[x]*Bxp[y];
}
__syncthreads();
#pragma unroll
for( int y=0; y<4; y++)
Abs[tx2 + y*16][ty2] = xxA[y];
#pragma unroll
for( int y=0; y<4; y++)
Bb[ty2][tx2+y*16] = xxB[y];
__syncthreads();
}
C += tx2 + ibx + __mul24(ty2 + iby ,ldc);
#pragma unroll
for(int j1=0; j1<16; j1++)
{
#pragma unroll
for( int y=0; y<4; y++)
Bxp[y] = Bb[j1][ty2 + y*16];
#pragma unroll
for( int y=0; y<4; y++)
Axs[y] = Abs[tx2 + y*16][j1] ;
#pragma unroll
for( int x=0; x<4; x++)
#pragma unroll
for( int y=0;y<4;y++)
Cb[x*4+y] += Axs[x]*Bxp[y];
}
int gy = iby + ty2;
#pragma unroll
for( int y=0; y<4; y++, gy+=16)
{
int gx = ibx + tx2;
#pragma unroll
for(int x=0; x<4; x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y + x*4] + beta * C[x*16];
}
C+=ldc*16;
}
}
extern "C" void
magmablas_dgemm_fermi( char TRANSA, char TRANSB, int m , int n , int k ,
double alpha, const double *A, int lda,
const double *B, int ldb,
double beta, double *C, int ldc )
{
/* -- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
Purpose
=======
DGEMM performs one of the matrix-matrix operations
C := alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or op( X ) = X',
alpha and beta are scalars, and A, B and C are matrices, with op( A )
an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
==========
TRANSA - CHARACTER*1.
On entry, TRANSA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
TRANSA = 'N' or 'n', op( A ) = A.
TRANSA = 'T' or 't', op( A ) = A'.
TRANSA = 'C' or 'c', op( A ) = A'.
Unchanged on exit.
TRANSB - CHARACTER*1.
On entry, TRANSB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
TRANSB = 'N' or 'n', op( B ) = B.
TRANSB = 'T' or 't', op( B ) = B'.
TRANSB = 'C' or 'c', op( B ) = B'.
Unchanged on exit.
M - INTEGER.
On entry, M specifies the number of rows of the matrix
op( A ) and of the matrix C. M must be at least zero.
Unchanged on exit.
N - INTEGER.
On entry, N specifies the number of columns of the matrix
op( B ) and the number of columns of the matrix C. N must be
at least zero.
Unchanged on exit.
K - INTEGER.
On entry, K specifies the number of columns of the matrix
op( A ) and the number of rows of the matrix op( B ). K must
be at least zero.
Unchanged on exit.
ALPHA - DOUBLE PRECISION.
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is
k when TRANSA = 'N' or 'n', and is m otherwise.
Before entry with TRANSA = 'N' or 'n', the leading m by k
part of the array A must contain the matrix A, otherwise
the leading k by m part of the array A must contain the
matrix A.
Unchanged on exit.
LDA - INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When TRANSA = 'N' or 'n' then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
Unchanged on exit.
B - DOUBLE PRECISION array of DIMENSION ( LDB, kb ), where kb is
n when TRANSB = 'N' or 'n', and is k otherwise.
Before entry with TRANSB = 'N' or 'n', the leading k by n
part of the array B must contain the matrix B, otherwise
the leading n by k part of the array B must contain the
matrix B.
Unchanged on exit.
LDB - INTEGER.
On entry, LDB specifies the first dimension of B as declared
in the calling (sub) program. When TRANSB = 'N' or 'n' then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
Unchanged on exit.
BETA - DOUBLE PRECISION.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then C need not be set on input.
Unchanged on exit.
C - DOUBLE PRECISION array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array C must
contain the matrix C, except when beta is zero, in which
case C need not be set on entry.
On exit, the array C is overwritten by the m by n matrix
( alpha*op( A )*op( B ) + beta*C ).
LDC - INTEGER.
On entry, LDC specifies the first dimension of C as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
Unchanged on exit.
===================================================================== */
if (m<=0 || n<=0 || k<=0)
return;
size_t offsetA = 0;
size_t offsetB = 0;
int TransA = 1, TransB = 1;
if (TRANSA == 'N' || TRANSA == 'n')
TransA = 0;
if (TRANSB == 'N' || TRANSB == 'n')
TransB = 0;
size_t sizeA = (size_t) lda * (size_t) (!TransA ? k : m);
size_t sizeB = (size_t) ldb * (size_t) (!TransB ? n : k);
// size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512) / 2;
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if (sizeA>=CUBLAS_MAX_1DBUF_SIZE ||
sizeB>=CUBLAS_MAX_1DBUF_SIZE )
{
// printf("Exceeding texuture limit (CUBLAS_MAX_1DBUF_SIZE=%ld), using cublasSgemm\n", CUBLAS_MAX_1DBUF_SIZE);
cublasDgemm(TRANSA, TRANSB, m, n, k, alpha,
A, lda, B, ldb,
beta, C, ldc);
return;
}
cudaError_t errt;
errt = cudaBindTexture(&offsetA, tex_x_double_A, (int2 *)A,
sizeA * sizeof(A[0]));
if( errt != cudaSuccess)
{
printf("can not bind to texture \n");
return;
}
errt = cudaBindTexture(&offsetB, tex_x_double_B, (int2 *)B,
sizeB * sizeof(B[0]));
if( errt != cudaSuccess)
{
printf("can not bind to texture \n");
return;
}
dim3 threads( 64, 4 );
dim3 grid(m/(64)+(m%(64)!=0),n/(64)+(n%(64)!=0));
offsetA = offsetA/sizeof(A[0]);
offsetB = offsetB/sizeof(B[0]);
if ( TransB )
if ( !TransA )
fermiDgemm_v2_kernel_NT<<< grid, threads, 0, magma_stream >>>(C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
else
fermiDgemm_v2_kernel_TT<<< grid, threads, 0, magma_stream >>>(C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
else
if ( !TransA )
fermiDgemm_v2_kernel_NN<<< grid, threads, 0, magma_stream >>>(C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
else
fermiDgemm_v2_kernel_TN<<< grid, threads, 0, magma_stream >>>(C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
cudaUnbindTexture ( tex_x_double_A ) ;
cudaUnbindTexture ( tex_x_double_B ) ;
}
|
208cebdf74b9f98a4fdf233aa4ea095bd4d61204.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdio.h"
#include <hip/hip_runtime.h>
#include "../CudaHelper.h"
#include <assert.h>
#include <sys/time.h> // time
#include <stdlib.h> // (s)rand
#include <math.h> // sqrt
struct Float3
{
float x;
float y;
float z;
};
const unsigned int COUNT = 1 << 12; // 4K
void FindClosestCPU(int* indexArray,Float3* pointArray, unsigned int count)
{
assert(NULL != indexArray);
assert(NULL != pointArray);
assert(count >= 2);
float closestDistance = 10000000.0f;
float curPointDistance = 0.0f;
for(int curPoint = 0; curPoint < count; curPoint++)
{
for(int i = 0; i < count; i++)
{
if( i == curPoint)
{
continue;
}
curPointDistance = sqrt((pointArray[curPoint].x - pointArray[i].x) * (pointArray[curPoint].x - pointArray[i].x) +
(pointArray[curPoint].y - pointArray[i].y) * (pointArray[curPoint].y - pointArray[i].y) +
(pointArray[curPoint].z - pointArray[i].z) * (pointArray[curPoint].z - pointArray[i].z));
/*
* curPointDistance = (pointArray[curPoint].x - pointArray[i].x) * (pointArray[curPoint].x - pointArray[i].x) +
* (pointArray[curPoint].y - pointArray[i].y) * (pointArray[curPoint].y - pointArray[i].y) +
* (pointArray[curPoint].z - pointArray[i].z) * (pointArray[curPoint].z - pointArray[i].z);
*/
if(curPointDistance < closestDistance)
{
closestDistance = curPointDistance;
indexArray[curPoint] = i;
}
}
}
}
int main(int argv, char* argc[])
{
int *h_indexOfClosestPoint = new int[COUNT];
assert(NULL != h_indexOfClosestPoint);
Float3 *h_pointArray = new Float3[COUNT];
assert(NULL != h_pointArray);
srand((int)time(NULL));
for(int i = 0; i < COUNT; i++)
{
h_pointArray[i].x = (float)(rand()%1000);
h_pointArray[i].y = (float)(rand()%1000);
h_pointArray[i].z = (float)(rand()%1000);
}
struct timeval start_time;
struct timeval stop_time;
for(int i = 0; i < 10; i++)
{
gettimeofday(&start_time, 0);
FindClosestCPU(h_indexOfClosestPoint, h_pointArray, COUNT);
gettimeofday(&stop_time, 0);
float fElapsedTimeViaCPU = 1000.0 * (stop_time.tv_sec - start_time.tv_sec) + (0.001 * (stop_time.tv_usec - start_time.tv_usec));
printf("%d elapsed time via CPU is %f ms.\n", i , fElapsedTimeViaCPU);
}
// print first 0~10 index point
for (int i = 0; i< 10; i++)
{
printf("index [%d](point is [%f,%f,%f]) is closest to index [%d](point is [%f,%f,%f])\n",
i, h_pointArray[i].x, h_pointArray[i].y, h_pointArray[i].z,
h_indexOfClosestPoint[i], h_pointArray[h_indexOfClosestPoint[i]].x, h_pointArray[h_indexOfClosestPoint[i]].y,
h_pointArray[h_indexOfClosestPoint[i]].z);
}
if(NULL != h_indexOfClosestPoint) { delete []h_indexOfClosestPoint;}
if(NULL != h_pointArray) { delete []h_pointArray;}
}
| 208cebdf74b9f98a4fdf233aa4ea095bd4d61204.cu | #include "stdio.h"
#include <cuda_runtime.h>
#include "../CudaHelper.h"
#include <assert.h>
#include <sys/time.h> // time
#include <stdlib.h> // (s)rand
#include <math.h> // sqrt
struct Float3
{
float x;
float y;
float z;
};
const unsigned int COUNT = 1 << 12; // 4K
void FindClosestCPU(int* indexArray,Float3* pointArray, unsigned int count)
{
assert(NULL != indexArray);
assert(NULL != pointArray);
assert(count >= 2);
float closestDistance = 10000000.0f;
float curPointDistance = 0.0f;
for(int curPoint = 0; curPoint < count; curPoint++)
{
for(int i = 0; i < count; i++)
{
if( i == curPoint)
{
continue;
}
curPointDistance = sqrt((pointArray[curPoint].x - pointArray[i].x) * (pointArray[curPoint].x - pointArray[i].x) +
(pointArray[curPoint].y - pointArray[i].y) * (pointArray[curPoint].y - pointArray[i].y) +
(pointArray[curPoint].z - pointArray[i].z) * (pointArray[curPoint].z - pointArray[i].z));
/*
* curPointDistance = (pointArray[curPoint].x - pointArray[i].x) * (pointArray[curPoint].x - pointArray[i].x) +
* (pointArray[curPoint].y - pointArray[i].y) * (pointArray[curPoint].y - pointArray[i].y) +
* (pointArray[curPoint].z - pointArray[i].z) * (pointArray[curPoint].z - pointArray[i].z);
*/
if(curPointDistance < closestDistance)
{
closestDistance = curPointDistance;
indexArray[curPoint] = i;
}
}
}
}
int main(int argv, char* argc[])
{
int *h_indexOfClosestPoint = new int[COUNT];
assert(NULL != h_indexOfClosestPoint);
Float3 *h_pointArray = new Float3[COUNT];
assert(NULL != h_pointArray);
srand((int)time(NULL));
for(int i = 0; i < COUNT; i++)
{
h_pointArray[i].x = (float)(rand()%1000);
h_pointArray[i].y = (float)(rand()%1000);
h_pointArray[i].z = (float)(rand()%1000);
}
struct timeval start_time;
struct timeval stop_time;
for(int i = 0; i < 10; i++)
{
gettimeofday(&start_time, 0);
FindClosestCPU(h_indexOfClosestPoint, h_pointArray, COUNT);
gettimeofday(&stop_time, 0);
float fElapsedTimeViaCPU = 1000.0 * (stop_time.tv_sec - start_time.tv_sec) + (0.001 * (stop_time.tv_usec - start_time.tv_usec));
printf("%d elapsed time via CPU is %f ms.\n", i , fElapsedTimeViaCPU);
}
// print first 0~10 index point
for (int i = 0; i< 10; i++)
{
printf("index [%d](point is [%f,%f,%f]) is closest to index [%d](point is [%f,%f,%f])\n",
i, h_pointArray[i].x, h_pointArray[i].y, h_pointArray[i].z,
h_indexOfClosestPoint[i], h_pointArray[h_indexOfClosestPoint[i]].x, h_pointArray[h_indexOfClosestPoint[i]].y,
h_pointArray[h_indexOfClosestPoint[i]].z);
}
if(NULL != h_indexOfClosestPoint) { delete []h_indexOfClosestPoint;}
if(NULL != h_pointArray) { delete []h_pointArray;}
}
|
8421581a1fdc3b648275f9921f3dcde33d6040c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_renderRGBA2Surface(hipSurfaceObject_t surface, dev_t *src, int pitch, int pixel_w, int pixel_h)
{
unsigned int dim_x = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int dim_y = blockDim.y * blockIdx.y + threadIdx.y;
if (dim_x < pixel_w && dim_y < pixel_h)
{
u_char r,g,b;
r = *((u_char*)src + dim_y * pitch + dim_x * 4);
g = *((u_char*)src + dim_y * pitch + dim_x * 4 + 1);
b = *((u_char*)src + dim_y * pitch + dim_x * 4 + 2);
uchar4 data = make_uchar4(r, g, b, 0xff);
surf2Dwrite(data, surface, dim_x * sizeof(uchar4), dim_y);
}
} | 8421581a1fdc3b648275f9921f3dcde33d6040c9.cu | #include "includes.h"
__global__ void kernel_renderRGBA2Surface(cudaSurfaceObject_t surface, dev_t *src, int pitch, int pixel_w, int pixel_h)
{
unsigned int dim_x = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int dim_y = blockDim.y * blockIdx.y + threadIdx.y;
if (dim_x < pixel_w && dim_y < pixel_h)
{
u_char r,g,b;
r = *((u_char*)src + dim_y * pitch + dim_x * 4);
g = *((u_char*)src + dim_y * pitch + dim_x * 4 + 1);
b = *((u_char*)src + dim_y * pitch + dim_x * 4 + 2);
uchar4 data = make_uchar4(r, g, b, 0xff);
surf2Dwrite(data, surface, dim_x * sizeof(uchar4), dim_y);
}
} |
2fee0619627842f33206809c13555feac0df6b25.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file update_k.cu
* \brief Kernel declarations for optical flow update computation.
* \copyright 2015, Juan David Adarve, ANU. See AUTHORS for more details
* \license 3-clause BSD, see LICENSE for more details
*/
#include "flowfilter/gpu/device/image_k.h"
#include "flowfilter/gpu/device/update_k.h"
namespace flowfilter {
namespace gpu {
__global__ void flowUpdate_k(gpuimage_t<float> newImage,
gpuimage_t<float2> newImageGradient,
gpuimage_t<float> oldImage, gpuimage_t<float2> oldFlow,
gpuimage_t<float> imageUpdated, gpuimage_t<float2> flowUpdated,
const float gamma, const float maxflow) {
const int height = flowUpdated.height;
const int width = flowUpdated.width;
// pixel coordinate
const int2 pix = make_int2(blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y);
if(pix.x >= width || pix.y >= height) {
return;
}
// read elements from the different arrays
float2 a1 = *coordPitch(newImageGradient, pix);
float a0 = *coordPitch(newImage, pix);
float a0old = *coordPitch(oldImage, pix);
float2 ofOld = *coordPitch(oldFlow, pix);
//#################################
// FLOW UPDATE
//#################################
// temporal derivative
float Yt = a0old - a0;
float ax2 = a1.x*a1.x;
float ay2 = a1.y*a1.y;
// elements of the adjucate matrix of M
float N00 = gamma + ay2;
float N01 = -a1.x*a1.y;
float N10 = N01;
float N11 = gamma + ax2;
// reciprocal determinant of M
float rdetM = 1.0f / (gamma*(gamma + ax2 + ay2));
// q vector components
float qx = gamma*ofOld.x + a1.x*Yt;
float qy = gamma*ofOld.y + a1.y*Yt;
// computes the updated optical flow
float2 ofNew = make_float2( (N00*qx + N01*qy)*rdetM,
(N10*qx + N11*qy)*rdetM);
// truncates the flow to lie in its allowed interval
ofNew.x = max(-maxflow, min(ofNew.x, maxflow));
ofNew.y = max(-maxflow, min(ofNew.y, maxflow));
// sanitize the output
ofNew.x = isinf(ofNew.x) + isnan(ofNew.x) > 0? 0.0f : ofNew.x;
ofNew.y = isinf(ofNew.y) + isnan(ofNew.y) > 0? 0.0f : ofNew.y;
//#################################
// PACK RESULTS
//#################################
*coordPitch(flowUpdated, pix) = ofNew;
*coordPitch(imageUpdated, pix) = a0;
}
__global__ void deltaFlowUpdate_k(gpuimage_t<float> newImage,
gpuimage_t<float2> newImageGradient,
gpuimage_t<float> oldImage,
gpuimage_t<float2> oldDeltaFlow,
hipTextureObject_t oldFlowTexture,
gpuimage_t<float> imageUpdated,
gpuimage_t<float2> deltaFlowUpdated,
gpuimage_t<float2> flowUpdated,
const float gamma, const float maxflow) {
const int height = flowUpdated.height;
const int width = flowUpdated.width;
// pixel coordinate
const int2 pix = make_int2(blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y);
if(pix.x >= width || pix.y >= height) {
return;
}
// read elements from the different arrays
float2 a1 = *coordPitch(newImageGradient, pix);
float a0 = *coordPitch(newImage, pix);
float a0old = *coordPitch(oldImage, pix);
float2 deltaFlowOld = *coordPitch(oldDeltaFlow, pix);
//#################################
// FLOW UPDATE
//#################################
// temporal derivative
float Yt = a0old - a0;
float ax2 = a1.x*a1.x;
float ay2 = a1.y*a1.y;
// elements of the adjucate matrix of M
float N00 = gamma + ay2;
float N01 = -a1.x*a1.y;
float N10 = N01;
float N11 = gamma + ax2;
// reciprocal determinant of M
float rdetM = 1.0f / (gamma*(gamma + ax2 + ay2));
// q vector components
float qx = gamma*deltaFlowOld.x + a1.x*Yt;
float qy = gamma*deltaFlowOld.y + a1.y*Yt;
// computes updated optical flow
float2 dFlowNew = make_float2( (N00*qx + N01*qy)*rdetM,
(N10*qx + N11*qy)*rdetM);
// sanitize output
dFlowNew.x = isinf(dFlowNew.x) + isnan(dFlowNew.x) > 0? 0.0f : dFlowNew.x;
dFlowNew.y = isinf(dFlowNew.y) + isnan(dFlowNew.y) > 0? 0.0f : dFlowNew.y;
// truncates dflow to lie in its allowed interval
dFlowNew.x = max(-0.5f*maxflow, min(dFlowNew.x, 0.5f*maxflow));
dFlowNew.y = max(-0.5f*maxflow, min(dFlowNew.y, 0.5f*maxflow));
//#################################
// OPTICAL FLOW COMPUTATION
//#################################
// read upper level flow
// normalized texture coordinates
float u = (float)pix.x / (float)(width -1);
float v = (float)pix.y / (float)(height -1);
// linear interpolation of flow value
float2 fup = tex2D<float2>(oldFlowTexture, u, v);
float2 flowUp = make_float2(2.0*fup.x, 2.0*fup.y);
// update upsampled flow from top level
float2 flowNew = make_float2(dFlowNew.x + flowUp.x,
dFlowNew.y + flowUp.y);
// truncates flow to lie in its allowed interval
flowNew.x = max(-maxflow, min(flowNew.x, maxflow));
flowNew.y = max(-maxflow, min(flowNew.y, maxflow));
//#################################
// PACK RESULTS
//#################################
*coordPitch(deltaFlowUpdated, pix) = dFlowNew;
*coordPitch(flowUpdated, pix) = flowNew;
*coordPitch(imageUpdated, pix) = a0;
}
}; // namespace gpu
}; // namespace flowfilter
| 2fee0619627842f33206809c13555feac0df6b25.cu | /**
* \file update_k.cu
* \brief Kernel declarations for optical flow update computation.
* \copyright 2015, Juan David Adarve, ANU. See AUTHORS for more details
* \license 3-clause BSD, see LICENSE for more details
*/
#include "flowfilter/gpu/device/image_k.h"
#include "flowfilter/gpu/device/update_k.h"
namespace flowfilter {
namespace gpu {
__global__ void flowUpdate_k(gpuimage_t<float> newImage,
gpuimage_t<float2> newImageGradient,
gpuimage_t<float> oldImage, gpuimage_t<float2> oldFlow,
gpuimage_t<float> imageUpdated, gpuimage_t<float2> flowUpdated,
const float gamma, const float maxflow) {
const int height = flowUpdated.height;
const int width = flowUpdated.width;
// pixel coordinate
const int2 pix = make_int2(blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y);
if(pix.x >= width || pix.y >= height) {
return;
}
// read elements from the different arrays
float2 a1 = *coordPitch(newImageGradient, pix);
float a0 = *coordPitch(newImage, pix);
float a0old = *coordPitch(oldImage, pix);
float2 ofOld = *coordPitch(oldFlow, pix);
//#################################
// FLOW UPDATE
//#################################
// temporal derivative
float Yt = a0old - a0;
float ax2 = a1.x*a1.x;
float ay2 = a1.y*a1.y;
// elements of the adjucate matrix of M
float N00 = gamma + ay2;
float N01 = -a1.x*a1.y;
float N10 = N01;
float N11 = gamma + ax2;
// reciprocal determinant of M
float rdetM = 1.0f / (gamma*(gamma + ax2 + ay2));
// q vector components
float qx = gamma*ofOld.x + a1.x*Yt;
float qy = gamma*ofOld.y + a1.y*Yt;
// computes the updated optical flow
float2 ofNew = make_float2( (N00*qx + N01*qy)*rdetM,
(N10*qx + N11*qy)*rdetM);
// truncates the flow to lie in its allowed interval
ofNew.x = max(-maxflow, min(ofNew.x, maxflow));
ofNew.y = max(-maxflow, min(ofNew.y, maxflow));
// sanitize the output
ofNew.x = isinf(ofNew.x) + isnan(ofNew.x) > 0? 0.0f : ofNew.x;
ofNew.y = isinf(ofNew.y) + isnan(ofNew.y) > 0? 0.0f : ofNew.y;
//#################################
// PACK RESULTS
//#################################
*coordPitch(flowUpdated, pix) = ofNew;
*coordPitch(imageUpdated, pix) = a0;
}
__global__ void deltaFlowUpdate_k(gpuimage_t<float> newImage,
gpuimage_t<float2> newImageGradient,
gpuimage_t<float> oldImage,
gpuimage_t<float2> oldDeltaFlow,
cudaTextureObject_t oldFlowTexture,
gpuimage_t<float> imageUpdated,
gpuimage_t<float2> deltaFlowUpdated,
gpuimage_t<float2> flowUpdated,
const float gamma, const float maxflow) {
const int height = flowUpdated.height;
const int width = flowUpdated.width;
// pixel coordinate
const int2 pix = make_int2(blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y);
if(pix.x >= width || pix.y >= height) {
return;
}
// read elements from the different arrays
float2 a1 = *coordPitch(newImageGradient, pix);
float a0 = *coordPitch(newImage, pix);
float a0old = *coordPitch(oldImage, pix);
float2 deltaFlowOld = *coordPitch(oldDeltaFlow, pix);
//#################################
// FLOW UPDATE
//#################################
// temporal derivative
float Yt = a0old - a0;
float ax2 = a1.x*a1.x;
float ay2 = a1.y*a1.y;
// elements of the adjucate matrix of M
float N00 = gamma + ay2;
float N01 = -a1.x*a1.y;
float N10 = N01;
float N11 = gamma + ax2;
// reciprocal determinant of M
float rdetM = 1.0f / (gamma*(gamma + ax2 + ay2));
// q vector components
float qx = gamma*deltaFlowOld.x + a1.x*Yt;
float qy = gamma*deltaFlowOld.y + a1.y*Yt;
// computes updated optical flow
float2 dFlowNew = make_float2( (N00*qx + N01*qy)*rdetM,
(N10*qx + N11*qy)*rdetM);
// sanitize output
dFlowNew.x = isinf(dFlowNew.x) + isnan(dFlowNew.x) > 0? 0.0f : dFlowNew.x;
dFlowNew.y = isinf(dFlowNew.y) + isnan(dFlowNew.y) > 0? 0.0f : dFlowNew.y;
// truncates dflow to lie in its allowed interval
dFlowNew.x = max(-0.5f*maxflow, min(dFlowNew.x, 0.5f*maxflow));
dFlowNew.y = max(-0.5f*maxflow, min(dFlowNew.y, 0.5f*maxflow));
//#################################
// OPTICAL FLOW COMPUTATION
//#################################
// read upper level flow
// normalized texture coordinates
float u = (float)pix.x / (float)(width -1);
float v = (float)pix.y / (float)(height -1);
// linear interpolation of flow value
float2 fup = tex2D<float2>(oldFlowTexture, u, v);
float2 flowUp = make_float2(2.0*fup.x, 2.0*fup.y);
// update upsampled flow from top level
float2 flowNew = make_float2(dFlowNew.x + flowUp.x,
dFlowNew.y + flowUp.y);
// truncates flow to lie in its allowed interval
flowNew.x = max(-maxflow, min(flowNew.x, maxflow));
flowNew.y = max(-maxflow, min(flowNew.y, maxflow));
//#################################
// PACK RESULTS
//#################################
*coordPitch(deltaFlowUpdated, pix) = dFlowNew;
*coordPitch(flowUpdated, pix) = flowNew;
*coordPitch(imageUpdated, pix) = a0;
}
}; // namespace gpu
}; // namespace flowfilter
|
b8c7be0c579dc01b4b0f287fd0402cef6fc8fd7f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "neural_net.h"
#include <time.h>
#include <cstdio>
#include <string>
template <typename T>
__global__ void softmaxLossBackProp(int *y, T *SO, T *dSO, int batch_size, int output_size, float eps) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size)
return;
int cur_class = static_cast<int>(y[i]);
dSO[i * output_size + cur_class] = -1 / (SO[i * output_size + cur_class] * batch_size + eps);
}
template <typename T>
__global__ void computeSoftmaxLoss(T *O, int *y, float *loss, int batch_size, int num_classes, float eps) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size)
return;
loss[i] = -logf(O[i * num_classes + y[i]] + eps);
}
template <typename T>
__global__ void inferClass(T *O, int *pred_y, int batch_size, int num_classes) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size)
return;
T max = O[i * num_classes];
int index = 0;
for (int j = 1; j < num_classes; j++) {
if (O[i * num_classes + j] > max) {
max = O[i * num_classes + j];
index = j;
}
}
pred_y[i] = index;
}
float NeuralNet::computeLoss() {
if (layer_type[num_layers - 1] == SOFTMAX) {
if (data_type == CUDNN_DATA_FLOAT)
hipLaunchKernelGGL(( computeSoftmaxLoss<float>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, (float *)layer_input[num_layers], this->y, loss, batch_size, num_classes, softmax_eps);
else if (data_type == CUDNN_DATA_DOUBLE)
hipLaunchKernelGGL(( computeSoftmaxLoss<double>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, (double *)layer_input[num_layers], this->y, loss, batch_size, num_classes, softmax_eps);
}
checkCudaErrors(hipMemcpy(h_loss, loss, batch_size * sizeof(float), hipMemcpyDeviceToHost));
float total_loss = 0.0;
for (int i = 0; i < batch_size; i++)
total_loss += h_loss[i];
return total_loss / batch_size;
}
void NeuralNet::compareOutputCorrect(int *correct_count, int *y) {
*correct_count = 0;
if (data_type == CUDNN_DATA_FLOAT) {
float *typecast_O = (float *)layer_input[num_layers - 1];
hipLaunchKernelGGL(( inferClass<float>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, typecast_O, pred_y, batch_size, num_classes);
checkCudaErrors(hipMemcpy(h_pred_y, pred_y, batch_size * sizeof(int), hipMemcpyDeviceToHost));
for (int i = 0; i < batch_size; i++) {
if (h_pred_y[i] == y[i])
*correct_count = *correct_count + 1;
}
}
else if (data_type == CUDNN_DATA_DOUBLE) {
double *typecast_O = (double *)layer_input[num_layers - 1];
hipLaunchKernelGGL(( inferClass<double>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, typecast_O, pred_y, batch_size, num_classes);
checkCudaErrors(hipMemcpy(h_pred_y, pred_y, batch_size * sizeof(int), hipMemcpyDeviceToHost));
for (int i = 0; i < batch_size; i++) {
if (h_pred_y[i] == y[i])
*correct_count = *correct_count + 1;
}
}
}
NeuralNet::NeuralNet(std::vector<LayerSpecifier> &layers, DataType data_type, int batch_size, TensorFormat tensor_format,
long long dropout_seed, float softmax_eps, float init_std_dev, vDNNType vdnn_type, vDNNConvAlgo vdnn_conv_algo,
UpdateRule update_rule) {
// ---------------------- vDNN start ----------------------
checkCudaErrors(hipStreamCreate(&stream_compute));
checkCudaErrors(hipStreamCreate(&stream_memory));
this->vdnn_type = vdnn_type;
this->vdnn_conv_algo = vdnn_conv_algo;
// ---------------------- vDNN end ------------------------
// create handle
checkCUDNN(cudnnCreate(&cudnn_handle));
checkCUDNN(cudnnSetStream(cudnn_handle, stream_compute));
checkCUBLAS(hipblasCreate(&cublas_handle));
checkCUBLAS(hipblasSetStream(cublas_handle, stream_compute));
checkCURAND(hiprandCreateGenerator(&curand_gen, HIPRAND_RNG_PSEUDO_DEFAULT));
checkCURAND(hiprandSetStream(curand_gen, stream_compute));
checkCudaErrors(hipMemGetInfo(&free_bytes, &total_bytes));
init_free_bytes = free_bytes;
std::cout << "Free bytes at start: " << free_bytes << std::endl;
pre_alloc_conv_derivative = false;
pre_alloc_fc_derivative = false;
pre_alloc_batch_norm_derivative = true;
if (vdnn_type == vDNN_NONE) {
pre_alloc_conv_derivative = true;
pre_alloc_fc_derivative = true;
pre_alloc_batch_norm_derivative = true;
}
if (data_type == DATA_FLOAT) {
this->data_type = CUDNN_DATA_FLOAT;
data_type_size = sizeof(float);
}
else if (data_type == DATA_DOUBLE) {
this->data_type = CUDNN_DATA_DOUBLE;
data_type_size = sizeof(double);
}
if (tensor_format == TENSOR_NCHW)
this->tensor_format = CUDNN_TENSOR_NCHW;
else if (tensor_format == TENSOR_NHWC)
this->tensor_format = CUDNN_TENSOR_NHWC;
this->batch_size = batch_size;
this->softmax_eps = softmax_eps;
this->init_std_dev = init_std_dev;
num_layers = layers.size();
// allocation of space for input to each layer
layer_input = (void **)malloc((num_layers + 1) * sizeof(void *));
layer_input_size = (int *)malloc((num_layers + 1) * sizeof(int));
dlayer_input = (void **)malloc((num_layers + 1) * sizeof(void *));
params = (void **)malloc(num_layers * sizeof(void *));
LayerDimension prev_output_size;
LayerDimension current_output_size;
for (int i = 0; i < num_layers; i++) {
layer_type.push_back(layers[i].type);
if (layers[i].type == CONV) {
ConvDescriptor *user_params = (ConvDescriptor *)layers[i].params;
params[i] = malloc(sizeof(ConvLayerParams));
((ConvLayerParams *)params[i])->initializeValues(cudnn_handle, user_params, this->data_type, batch_size, this->tensor_format,
data_type_size, current_output_size, update_rule);
}
else if (layers[i].type == FULLY_CONNECTED) {
FCDescriptor *user_params = (FCDescriptor *)layers[i].params;
params[i] = malloc(sizeof(FCLayerParams));
((FCLayerParams *)params[i])->initializeValues(user_params, batch_size, this->tensor_format, this->data_type,
current_output_size, update_rule);
}
else if (layers[i].type == DROPOUT) {
DropoutDescriptor *user_params = (DropoutDescriptor *)layers[i].params;
params[i] = malloc(sizeof(DropoutLayerParams));
((DropoutLayerParams *)params[i])->initializeValues(cudnn_handle, user_params, this->data_type, batch_size,
this->tensor_format, current_output_size);
}
else if (layers[i].type == BATCHNORM) {
BatchNormDescriptor *user_params = (BatchNormDescriptor *)layers[i].params;
params[i] = malloc(sizeof(BatchNormLayerParams));
((BatchNormLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format, batch_size,
current_output_size, update_rule);
}
else if (layers[i].type == POOLING) {
PoolingDescriptor *user_params = (PoolingDescriptor *)layers[i].params;
params[i] = malloc(sizeof(BatchNormLayerParams));
((PoolingLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format,
batch_size, current_output_size);
}
else if (layers[i].type == ACTV) {
ActivationDescriptor *user_params = (ActivationDescriptor *)layers[i].params;
params[i] = malloc(sizeof(ActivationLayerParams));
((ActivationLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format,
batch_size, current_output_size);
}
else if (layers[i].type == SOFTMAX) {
SoftmaxDescriptor *user_params = (SoftmaxDescriptor *)layers[i].params;
params[i] = malloc(sizeof(SoftmaxLayerParams));
((SoftmaxLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format,
batch_size, current_output_size);
// std::cout << current_output_size.N << ' ' << current_output_size.C << current_output_size.H << current_output_size.W << std::endl;
}
if (i == 0) {
prev_output_size = current_output_size;
}
// incomplete - have to check flatten and check exact dimension
// else if (current_output_size.getTotalSize() != prev_output_size.getTotalSize()) {
// std::cout << "Layer " << i << " output and next layer's input size mismatch\n";
// exit(0);
// }
}
// ---------------------- vDNN start ----------------------
// allocate space in host memory for layers to be transferred
h_layer_input = (void **)malloc(num_layers * sizeof(void *));
to_offload = (bool *)malloc(num_layers * sizeof(bool));
prefetched = (bool *)malloc(num_layers * sizeof(bool));
// ---------------------- vDNN end ------------------------
checkCudaErrors(hipMemGetInfo(&free_bytes, &total_bytes));
std::cout << "Free bytes just before allocate space: " << free_bytes << std::endl;
// allocate space for parameters
// Exception BatchNorm - looks like it will take lots of space if only FC layers - space taken = size of one input
for (int i = 0; i < num_layers; i++) {
size_t input_size;
if (layers[i].type == CONV) {
ConvDescriptor *user_params = (ConvDescriptor *)layers[i].params;
((ConvLayerParams *)params[i])->allocateSpace(curand_gen, this->data_type, data_type_size, init_std_dev,
free_bytes, pre_alloc_conv_derivative);
input_size = batch_size * user_params->input_channels * user_params->input_h * user_params->input_w;
if (i == 0) {
input_channels = user_params->input_channels;
input_h = user_params->input_h;
input_w = user_params->input_w;
}
}
else if (layers[i].type == FULLY_CONNECTED) {
FCDescriptor *user_params = (FCDescriptor *)layers[i].params;
((FCLayerParams *)params[i])->allocateSpace(curand_gen, this->data_type, data_type_size, init_std_dev,
free_bytes, pre_alloc_fc_derivative);
input_size = batch_size * user_params->input_channels;
if (i == 0) {
input_channels = user_params->input_channels;
input_h = 1;
input_w = 1;
}
}
else if (layers[i].type == DROPOUT) {
DropoutDescriptor *user_params = (DropoutDescriptor *)layers[i].params;
((DropoutLayerParams *)params[i])->allocateSpace(free_bytes, cudnn_handle, user_params, dropout_seed);
input_size = batch_size * user_params->channels * user_params->h * user_params->w;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
}
else if (layers[i].type == BATCHNORM) {
BatchNormDescriptor *user_params = (BatchNormDescriptor *)layers[i].params;
((BatchNormLayerParams *)params[i])->allocateSpace(this->data_type, data_type_size,
free_bytes, pre_alloc_batch_norm_derivative);
input_size = batch_size * user_params->channels * user_params->h * user_params->w;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
}
else if (layers[i].type == POOLING) {
PoolingDescriptor *user_params = (PoolingDescriptor *)layers[i].params;
((PoolingLayerParams *)params[i])->allocateSpace(free_bytes);
input_size = batch_size * user_params->input_channels * user_params->input_h * user_params->input_w;
if (i == 0) {
input_channels = user_params->input_channels;
input_h = user_params->input_h;
input_w = user_params->input_w;
}
}
else if (layers[i].type == ACTV) {
ActivationDescriptor *user_params = (ActivationDescriptor *)layers[i].params;
((ActivationLayerParams *)params[i])->allocateSpace(free_bytes);
input_size = batch_size * user_params->channels * user_params->h * user_params->w;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
}
else if (layers[i].type == SOFTMAX) {
SoftmaxDescriptor *user_params = (SoftmaxDescriptor *)layers[i].params;
((SoftmaxLayerParams *)params[i])->allocateSpace(free_bytes);
input_size = batch_size * user_params->channels * user_params->h * user_params->w;
// assuming this is last layer, allocate for next layer as well
// checkCudaErrors(hipMalloc(&layer_input[i + 1], input_size * data_type_size));
// checkCudaErrors(hipMalloc(&dlayer_input[i + 1], input_size * data_type_size));
layer_input_size[i + 1] = input_size;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
if (i == num_layers - 1) {
num_classes = user_params->channels;
}
}
// do not allocate memory initially
// checkCudaErrors(hipMalloc(&layer_input[i], input_size * data_type_size));
// checkCudaErrors(hipMalloc(&dlayer_input[i], input_size * data_type_size));
// ---------------------- vDNN start ----------------------
layer_input_size[i] = input_size;
// ---------------------- vDNN end ------------------------
}
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemGetInfo(&free_bytes, &total_bytes));
std::cout << "Free bytes just after allocate space: " << free_bytes << std::endl;
// very small - could be allocated initially itself
checkCudaErrors(hipMalloc((void **)&y, batch_size * sizeof(int)));
checkCudaErrors(hipMalloc((void **)&pred_y, batch_size * sizeof(int)));
checkCudaErrors(hipMalloc((void **)&loss, batch_size * sizeof(float)));
checkCudaErrors(hipMalloc(&one_vec, batch_size * data_type_size));
if (this->data_type == CUDNN_DATA_FLOAT)
hipLaunchKernelGGL(( fillValue<float>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, (float *)one_vec, batch_size, 1);
else
hipLaunchKernelGGL(( fillValue<double>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, (double *)one_vec, batch_size, 1);
checkCudaErrors(hipHostMalloc((void **)&h_loss, batch_size * sizeof(float)));
checkCudaErrors(hipHostMalloc((void **)&h_pred_y, batch_size * sizeof(int)));
// do not allocate workspace initially
// allocate space for workspace and also keep track of algo
// size_t cur_workspace_size;
// workspace_size = 0;
// for (int i = 0; i < num_layers; i++) {
// if (layers[i].type == CONV) {
// ((ConvLayerParams *)params[i])->getWorkspaceSize(cur_workspace_size, free_bytes);
// if (cur_workspace_size > workspace_size)
// workspace_size = cur_workspace_size;
// }
// }
// checkCudaErrors(hipMalloc(&workspace, workspace_size));
// free_bytes = free_bytes - workspace_size;
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemGetInfo(&free_bytes, &total_bytes));
// leave 600 MB and use the rest
std::cout << "Free bytes: " << free_bytes << std::endl;
// ---------------------- vDNN start ----------------------
size_t exp_max_consume, max_consume;
vDNNOptimize(exp_max_consume, max_consume);
std::cout << "actual_max_consume: " << max_consume << std::endl;
std::cout << "exp_max_consume: " << exp_max_consume << std::endl;
std::cout << "diff_max_consume(MB): " << (max_consume - exp_max_consume) / (1.0 * 1024 * 1024) << std::endl;
std::cout << "exp_free_bytes(MB): " << (free_bytes - exp_max_consume) / (1.0 * 1024 * 1024) << std::endl;
std::cout << "exp_total_consume(MB): " << (init_free_bytes - (free_bytes - exp_max_consume)) / (1.0 * 1024 * 1024) << std::endl;
std::cout << "actual_total_consume(MB): " << (init_free_bytes - (free_bytes - max_consume)) / (1.0 * 1024 * 1024) << std::endl;
// ---------------------- vDNN end ------------------------
// remove later
exit(0);
// ---------------------- vDNN start ----------------------
free_bytes = max_consume;
cnmemDevice_t cnmem_device;
size_t cnmem_stream_memory_size = free_bytes;
cnmem_device.device = 0;
cnmem_device.size = cnmem_stream_memory_size;
cnmem_device.numStreams = 0;
cnmem_device.streams = NULL;
cnmem_device.streamSizes = NULL;
// do not allow call to hipMalloc
checkCNMEM(cnmemInit(1, &cnmem_device, CNMEM_FLAGS_CANNOT_GROW));
// ---------------------- vDNN end ------------------------
// ---------------------- vDNN start ----------------------
for (int i = 0; i < num_layers; i++) {
std::cerr << "to_offload[i] " << to_offload[i] << std::endl;
}
for (int i = 0; i < num_layers; i++) {
// allocate pinned memory in host
if (to_offload[i])
checkCudaErrors(hipHostMalloc(&h_layer_input[i], layer_input_size[i] * data_type_size));
}
// ---------------------- vDNN end ------------------------
checkCudaErrors(hipDeviceSynchronize());
size_t temp_free_bytes;
checkCudaErrors(hipMemGetInfo(&temp_free_bytes, &total_bytes));
std::cout << "Free bytes just before end of NeuralNet: " << temp_free_bytes << std::endl;
// {
// int n;
// std::cout << "waiting..\n";
// std::cin >> n;
// }
// data of time
checkCudaErrors(hipEventCreate(&start_compute));
checkCudaErrors(hipEventCreate(&stop_compute));
checkCudaErrors(hipEventCreate(&start_transfer));
checkCudaErrors(hipEventCreate(&stop_transfer));
}
bool NeuralNet::simulateNeuralNetworkMemory(vDNNConvAlgoPref algo_pref, bool hard, size_t &exp_max_consume, size_t &max_consume) {
CnmemSpace space_tracker(free_bytes);
max_consume = 0;
// forward pass
// allocate space for 1st input
std::cerr << "Initial Used space(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[0] * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating input(MB): " << space_tracker.getConsumed() << std::endl;
std::cerr << "Forward pass" << std::endl;
for (int i = 0; i < num_layers; i++) {
if (layer_type[i] == SOFTMAX)
break;
std::cerr << "Processing layer " << i << std::endl;
std::cerr << "Initial Used space(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i + 1] * data_type_size);
std::cerr << "Used space after output allocation(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
if (layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
size_t cur_workspace_size;
checkWORKSPACE(cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::FWD, algo_pref, hard, cur_workspace_size));
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
space_tracker.updateMaxConsume(max_consume);
if (!space_tracker.isAvailable())
return false;
std::cerr << "Used space after workspace allocation(MB): " << space_tracker.getConsumed() << std::endl;
// current layer computation over, deallocate workspace
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
std::cerr << "Used space after workspace deallocation(MB): " << space_tracker.getConsumed() << std::endl;
}
if (!space_tracker.isAvailable())
return false;
// deallocate layer input
if (to_offload[i]) {
std::cerr << "deallocating input to " << i << std::endl;
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size);
std::cerr << "Used space after deallocating input(MB): " << space_tracker.getConsumed() << std::endl;
}
}
std::cerr << "Backward pass" << std::endl;
if (batch_size * num_classes * data_type_size != layer_input_size[num_layers] * data_type_size) {
std::cout << "Panic!! Using wrong size\n";
exit(0);
}
// backward pass
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[num_layers] * data_type_size);
std::cerr << "Used space after allocating final derivative(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
// std::cerr << "max_consume: " << max_consume << std::endl;
for (int i = num_layers - 1; i >= 0; i--) {
// allocate space for previous layer derivative
std::cerr << "Processing layer " << i << std::endl;
std::cerr << "Used space initial(MB): " << space_tracker.getConsumed() << std::endl;
if (i > 0) {
if (layer_type[i] == SOFTMAX)
continue;
else {
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i] * data_type_size);
std::cerr << "Used space after allocating prev. derivative(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
}
// std::cerr << "max_consume: " << max_consume << std::endl;
}
int layer_to_prefetch = findPrefetchLayer(i);
// if layer to be prefetched, allocate space for that layer
if (layer_to_prefetch != -1) {
std::cerr << "Prefetch layer " << layer_to_prefetch << std::endl;
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[layer_to_prefetch] * data_type_size);
std::cerr << "Used space after allocating prefetch(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
}
if (layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
size_t cur_filter_workspace_size;
checkWORKSPACE(cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::BWD_FILTER, algo_pref, hard, cur_filter_workspace_size));
size_t cur_data_workspace_size = 0;
if (i > 0)
checkWORKSPACE(cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::BWD_DATA, algo_pref, hard, cur_data_workspace_size));
size_t cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size :cur_data_workspace_size;
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
std::cerr << "Used space after allocating workspace(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
if (!pre_alloc_conv_derivative) {
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
// std::cerr << "max_consume: " << max_consume << std::endl;
if (!space_tracker.isAvailable())
return false;
// current layer computation over, deallocate workspace
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
std::cerr << "Used space after deallocating workspace(MB): " << space_tracker.getConsumed() << std::endl;
if (!pre_alloc_conv_derivative) {
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size);
std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
}
else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (!pre_alloc_fc_derivative) {
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
if (!space_tracker.isAvailable())
return false;
if (!pre_alloc_fc_derivative) {
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size);
std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
}
else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (!pre_alloc_batch_norm_derivative) {
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
if (!space_tracker.isAvailable())
return false;
if (!pre_alloc_batch_norm_derivative) {
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size);
std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
}
if (!space_tracker.isAvailable())
return false;
// deallocate layer output and derivative
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size);
std::cerr << "Used space after deallocating output, derivative(MB): " << space_tracker.getConsumed() << std::endl;
// if 1st layer, deallocate input layer also
if (i == 0) {
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size);
std::cerr << "Used space after deallocating input(MB): " << space_tracker.getConsumed() << std::endl;
}
}
if (space_tracker.getConsumed() > 0)
std::cerr << "Panic!! more free bytes\n";
if (space_tracker.getConsumed() != 0)
std::cerr << "Panic!! bytes not freed properly\n";
// return true;
exp_max_consume = max_consume;
// check with cnmem once
bool ret_val = simulateCNMEMMemory(max_consume);
return ret_val;
}
bool NeuralNet::simulateCNMEMMemory(size_t &max_consume) {
size_t init_max_consume = max_consume;
cnmemDevice_t cnmem_device;
size_t t;
checkCudaErrors(hipMemGetInfo(&free_bytes, &t));
std::cout << "free_bytes: " << free_bytes << std::endl;
free_bytes -= 100 * 1024 * 1024;
cnmem_device.device = 0;
cnmem_device.numStreams = 0;
cnmem_device.streams = NULL;
cnmem_device.streamSizes = NULL;
std::string cnmem_memory_state_filename;
if (vdnn_type == vDNN_ALL) {
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_all_p.dat";
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_all_m.dat";
}
}
else if (vdnn_type == vDNN_CONV) {
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_conv_p.dat";
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_conv_m.dat";
}
}
else if (vdnn_type == vDNN_DYN) {
cnmem_memory_state_filename = "cnmem_dyn.dat";
}
else {
cnmem_memory_state_filename = "cnmem_unknown.dat";
}
FILE *cnmem_memory_state_fptr = fopen(cnmem_memory_state_filename.c_str(), "w");
size_t run_count = 0;
bool out_of_memory = false;
while (true) {
run_count++;
if (max_consume >= free_bytes)
break;
out_of_memory = false;
cnmem_device.size = max_consume;
std::cerr << run_count << ' ' << max_consume << std::endl;
if (max_consume > free_bytes)
std::cerr << "panic!! max_consume > free_bytes\n";
checkCNMEM(cnmemInit(1, &cnmem_device, CNMEM_FLAGS_CANNOT_GROW));
resetPrefetched();
fprintf(cnmem_memory_state_fptr, "//////////////////////////////////////////////////////////////////\n");
fprintf(cnmem_memory_state_fptr, "run_count: %lu\n", run_count);
fprintf(cnmem_memory_state_fptr, "max_consume: %lu\n", max_consume);
fprintf(cnmem_memory_state_fptr, "//////////////////////////////////////////////////////////////////\n");
fprintf(cnmem_memory_state_fptr, "initial state\n");
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
if (to_offload[0]) {
checkCNMEMSim(cnmemMalloc(&layer_input[0], layer_input_size[0] * data_type_size, NULL),
layer_input_size[0] * data_type_size, max_consume, free_bytes, checkCNMEM(cnmemFinalize()); continue, out_of_memory);
}
else {
checkCNMEMSim(cnmemMallocRight(&layer_input[0], layer_input_size[0] * data_type_size, NULL),
layer_input_size[0] * data_type_size, max_consume, free_bytes, checkCNMEM(cnmemFinalize()); continue, out_of_memory);
}
fprintf(cnmem_memory_state_fptr, "after alloc. layer_input[%d] - size: %lu\n", 0, layer_input_size[0] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
// forward propagate
for (int i = 0; i < num_layers; i++) {
size_t cur_workspace_size;
void *cur_workspace;
if (to_offload[i + 1]) {
checkCNMEMSim(cnmemMalloc(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL),
layer_input_size[i + 1] * data_type_size, max_consume, free_bytes, break, out_of_memory);
}
else {
checkCNMEMSimRight(cnmemMallocRight(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL),
layer_input_size[i + 1] * data_type_size, max_consume, free_bytes, break, out_of_memory);
}
fprintf(cnmem_memory_state_fptr, "after alloc. layer_input[%d] - size: %lu\n", i + 1, layer_input_size[i + 1] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
if (layer_type[i] == CONV) {
// std::cout << "conv\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_workspace_size = cur_params->fwd_workspace_size;
checkCNMEMSimRight(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL),
cur_workspace_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. conv. workspace - size: %lu\n", cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
if (layer_type[i] == CONV) {
checkCNMEMSim(cnmemFree(cur_workspace, NULL),
cur_workspace_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free conv. workspace - size: %lu\n", cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
if (to_offload[i]) {
checkCNMEMSim(cnmemFree(layer_input[i], NULL),
layer_input_size[i] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free layer_input[%d] - size: %lu\n", i, layer_input_size[i] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) {
i = i + 1;
}
}
if (out_of_memory) {
checkCNMEM(cnmemFinalize());
if (max_consume < free_bytes)
continue;
else
break;
}
checkCNMEMSim(cnmemMalloc(&dlayer_input[num_layers], batch_size * num_classes * data_type_size, NULL),
layer_input_size[num_layers] * data_type_size, max_consume, free_bytes, checkCNMEM(cnmemFinalize()); continue, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. dlayer_input[%d] - size: %lu\n", num_layers, layer_input_size[num_layers] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
for (int i = num_layers - 1; i >= 0; i--) {
// ---------------------- vDNN start ----------------------
size_t cur_filter_workspace_size, cur_data_workspace_size, cur_workspace_size;
void *cur_workspace;
if (i > 0) {
if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) {
dlayer_input[i] = dlayer_input[i + 1];
}
else {
int layer_to_prefetch = findPrefetchLayer(i);
if (layer_to_prefetch != -1) {
checkCNMEMSim(cnmemMalloc(&layer_input[layer_to_prefetch], layer_input_size[layer_to_prefetch] * data_type_size, NULL),
layer_input_size[layer_to_prefetch] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. prefetch layer_input[%d] - size: %lu\n", layer_to_prefetch, layer_input_size[layer_to_prefetch] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
checkCNMEMSim(cnmemMalloc(&dlayer_input[i], layer_input_size[i] * data_type_size, NULL),
layer_input_size[i] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. dlayer_input[%d] - size: %lu\n", i, layer_input_size[i] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
if (layer_type[i] == CONV) {
// std::cout << "here\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
// allocate space for derivative
if (!pre_alloc_conv_derivative) {
if (!cur_params->cnmemAllocDerivativesCheck(data_type_size, NULL, max_consume, free_bytes, out_of_memory))
break;
fprintf(cnmem_memory_state_fptr, "after alloc. dW - size: %lu\n", cur_params->kernel_size * data_type_size);
fprintf(cnmem_memory_state_fptr, "after alloc. db - size: %lu\n", cur_params->C_out * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
cur_filter_workspace_size = cur_params->bwd_filter_workspace_size;
if (i > 0)
cur_data_workspace_size = cur_params->bwd_data_workspace_size;
else
cur_data_workspace_size = 0;
cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size : cur_data_workspace_size;
checkCNMEMSim(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL),
cur_workspace_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. conv. workspace - size: %lu\n", cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (!pre_alloc_fc_derivative) {
if (!cur_params->cnmemAllocDerivativesCheck(data_type_size, NULL, max_consume, free_bytes, out_of_memory))
break;
fprintf(cnmem_memory_state_fptr, "after alloc. dW - size: %lu\n", cur_params->weight_matrix_size * data_type_size);
fprintf(cnmem_memory_state_fptr, "after alloc. db - size: %lu\n", cur_params->C_out * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (!pre_alloc_batch_norm_derivative) {
if (!cur_params->cnmemAllocDerivativesCheck(data_type_size, NULL, max_consume, free_bytes, out_of_memory))
break;
fprintf(cnmem_memory_state_fptr, "after alloc. dscale - size: %lu\n", cur_params->allocation_size * data_type_size);
fprintf(cnmem_memory_state_fptr, "after alloc. dbias - size: %lu\n", cur_params->allocation_size * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
else if (layer_type[i] == SOFTMAX) {
// std::cout << "compute here\n";
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
continue;
}
if (layer_type[i] == CONV) {
checkCNMEMSim(cnmemFree(cur_workspace, NULL),
cur_workspace_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free conv. workspace - size: %lu\n", cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
if (!pre_alloc_conv_derivative) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n", (long unsigned)0);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
else if (layer_type[i] == FULLY_CONNECTED) {
if (!pre_alloc_fc_derivative) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n", (long unsigned)0);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
else if (layer_type[i] == BATCHNORM) {
if (!pre_alloc_batch_norm_derivative) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n", (long unsigned)0);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
checkCNMEMSim(cnmemFree(layer_input[i + 1], NULL),
layer_input_size[i + 1] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free layer_input[%d] - size: %lu\n", i + 1, layer_input_size[i + 1] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
checkCNMEMSim(cnmemFree(dlayer_input[i + 1], NULL),
layer_input_size[i + 1] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free dlayer_input[%d] - size: %lu\n", i + 1, layer_input_size[i + 1] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
if (i == 0) {
checkCNMEMSim(cnmemFree(layer_input[i], NULL),
layer_input_size[i] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free layer_input[%d] - size: %lu\n", i, layer_input_size[i] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
checkCNMEM(cnmemFinalize());
if (out_of_memory) {
if (max_consume < free_bytes)
continue;
else
break;
}
break;
}
free_bytes += 100 * 1024 * 1024;
if (max_consume < free_bytes) {
double exp_size = (init_max_consume + init_free_bytes - free_bytes) / (1.0 * 1024 * 1024);
double act_size = (max_consume + init_free_bytes - free_bytes) / (1.0 * 1024 * 1024);
fprintf(cnmem_memory_state_fptr, "expected_memory_consume: %f MB\n", exp_size);
fprintf(cnmem_memory_state_fptr, "actual_memory_consume: %f MB\n", act_size);
}
else {
fprintf(cnmem_memory_state_fptr, "out of memory\n");
}
fclose(cnmem_memory_state_fptr);
if (max_consume < free_bytes)
return true;
else
return false;
}
void NeuralNet::vDNNOptimize(size_t &exp_max_consume, size_t &max_consume) {
bool hard = true, soft = false;
// if type is vDNN_ALL or vDNN_CONV, check if sufficient space is available
if (vdnn_type == vDNN_ALL) {
setOffload(OFFLOAD_ALL);
resetPrefetched();
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
return;
}
else if (vdnn_type == vDNN_CONV) {
setOffload(OFFLOAD_CONV);
resetPrefetched();
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
return;
}
else if (vdnn_type == vDNN_NONE) {
setOffload(OFFLOAD_NONE);
resetPrefetched();
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
return;
}
if (vdnn_type == vDNN_DYN) {
// check for trainability
std::cerr << "vDNN_DYN\n";
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if(!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
// check if work with fastest algo and no offload, if so, select it and return
setOffload(NeuralNet::OFFLOAD_NONE);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) {
std::cerr << "Choosing PERF_OPT, NO OFFLOAD\n";
return;
}
// check if conv offload and fastest algo works, then check if all offload and fastest algo works
setOffload(NeuralNet::OFFLOAD_CONV);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) {
std::cerr << "Choosing PERF_OPT, CONV OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) {
std::cerr << "Choosing PERF_OPT, ALL OFFLOAD\n";
return;
}
// optimize using greedy algo memory usage while improving performance
setOffload(NeuralNet::OFFLOAD_CONV);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, soft, exp_max_consume, max_consume)) {
std::cerr << "Choosing GREEDY, CONV OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, soft, exp_max_consume, max_consume)) {
std::cerr << "Choosing GREEDY, ALL OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_CONV);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) {
std::cerr << "Choosing MEM_OPT, CONV OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if(simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) {
std::cerr << "Choosing MEM_OPT, ALL OFFLOAD\n";
return;
}
}
exit(0);
}
void NeuralNet::setOffload(NeuralNet::OffloadType offload_type) {
if (offload_type == OFFLOAD_NONE) {
for (int i = 0; i < num_layers; i++)
to_offload[i] = false;
}
else if (offload_type == OFFLOAD_CONV) {
for (int i = 0; i < num_layers; i++) {
if (layer_type[i] == CONV)
to_offload[i] = true;
else
to_offload[i] = false;
}
// set last non SOFTMAX/ACTV layer to no_offload
for (int i = num_layers - 1; i >= 0; i--) {
if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV)
;
else {
to_offload[i] = false;
break;
}
}
}
else if (offload_type == OFFLOAD_ALL) {
for (int i = 0; i < num_layers; i++) {
if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX)
to_offload[i] = false;
else
to_offload[i] = true;
}
// set last non SOFTMAX/ACTV layer to no_offload
for (int i = num_layers - 1; i >= 0; i--) {
if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV)
;
else {
to_offload[i] = false;
break;
}
}
}
}
void NeuralNet::resetPrefetched() {
for (int i = 0; i < num_layers; i++)
prefetched[i] = false;
}
void NeuralNet::getLoss(void *X, int *y, double learning_rate, bool train, int *correct_count, float *loss) {
std::vector<float> t1, t2;
this->getLoss(X, y, learning_rate, t1, t2, train, correct_count, loss);
}
void NeuralNet::getLoss(void *X, int *y, double learning_rate, std::vector<float> &fwd_vdnn_lag, std::vector<float> &bwd_vdnn_lag, bool train, int *correct_count, float *scalar_loss) {
CnmemSpace space_tracker(free_bytes);
// std::cout << "here\n";
// std::cout << "Free bytes: " << free_bytes << std::endl;
for (int i = 0; i < num_layers; i++)
prefetched[i] = false;
checkCNMEM(cnmemMalloc(&layer_input[0], layer_input_size[0] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[0] * data_type_size);
checkCudaErrors(hipMemcpy(layer_input[0], X, batch_size * input_channels * input_h * input_w * data_type_size, hipMemcpyHostToDevice));
if (train == true) {
checkCudaErrors(hipMemcpy(this->y, y, batch_size * data_type_size, hipMemcpyHostToDevice));
}
float alpha = 1.0, beta = 0.0;
float Salpha = 1.0, Sbeta = 0.0;
double Dalpha = 1.0, Dbeta = 0.0;
// forward propagate
for (int i = 0; i < num_layers; i++) {
if (train == false && i == num_layers - 1)
break;
// ---------------------- vDNN start ----------------------
size_t cur_workspace_size;
void *cur_workspace;
// offload if required
if (i > 0 && to_offload[i] && train == true)
checkCudaErrors(hipMemcpyAsync(h_layer_input[i], layer_input[i],
layer_input_size[i] * data_type_size, hipMemcpyDeviceToHost, stream_memory));
checkCNMEM(cnmemMalloc(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i + 1] * data_type_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
// ---------------------- vDNN end ------------------------
// std::cout << "here" << i << std::endl;
if (layer_type[i] == CONV) {
// std::cout << "conv\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_workspace_size = cur_params->fwd_workspace_size;
checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL));
// computation
checkCUDNN(cudnnConvolutionForward(cudnn_handle, &alpha,
cur_params->input_tensor, layer_input[i],
cur_params->filter_desc, cur_params->W,
cur_params->conv_desc, cur_params->fwd_algo,
cur_workspace, cur_workspace_size,
&beta,
cur_params->output_tensor, layer_input[i + 1]));
checkCUDNN(cudnnAddTensor(cudnn_handle, &alpha,
cur_params->bias_desc, cur_params->b,
&alpha,
cur_params->output_tensor, layer_input[i + 1]));
// if activation required
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc,
&alpha,
cur_params->output_tensor, layer_input[i + 1],
&beta,
cur_params->output_tensor, layer_input[i + 1]));
}
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
}
else if (layer_type[i] == FULLY_CONNECTED) {
// std::cout << "FC\n";
FCLayerParams *cur_params = (FCLayerParams *)params[i];
// std::cout << "FChere" << i << std::endl;
if (data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(hipblasSgemm(cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
cur_params->C_out, batch_size, cur_params->C_in,
&Salpha,
(float *)cur_params->W, cur_params->C_out,
(float *)layer_input[i], cur_params->C_in,
&Sbeta,
(float *)layer_input[i + 1], cur_params->C_out));
checkCUBLAS(hipblasSgemm(cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
cur_params->C_out, batch_size, 1,
&Salpha,
(float *)cur_params->b, cur_params->C_out,
(float *)one_vec, 1,
&Salpha,
(float *)layer_input[i + 1], cur_params->C_out));
}
else if (data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(hipblasDgemm(cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
cur_params->C_out, batch_size, cur_params->C_in,
&Dalpha,
(double *)cur_params->W, cur_params->C_out,
(double *)layer_input[i], cur_params->C_in,
&Dbeta,
(double *)layer_input[i + 1], cur_params->C_out));
checkCUBLAS(hipblasDgemm(cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
cur_params->C_out, batch_size, 1,
&Dalpha,
(double *)cur_params->b, cur_params->C_out,
(double *)one_vec, 1,
&Dalpha,
(double *)layer_input[i + 1], cur_params->C_out));
}
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc,
&alpha,
cur_params->output_tensor, layer_input[i + 1],
&beta,
cur_params->output_tensor, layer_input[i + 1]));
}
// std::cout << "FChere" << i << std::endl;
}
else if (layer_type[i] == DROPOUT) {
// std::cout << "Dropout\n";
DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i];
checkCUDNN(cudnnDropoutForward(cudnn_handle, cur_params->dropout_desc,
cur_params->input_tensor, layer_input[i],
cur_params->input_tensor, layer_input[i + 1],
cur_params->reserved_space,
cur_params->reserved_space_size));
}
else if (layer_type[i] == BATCHNORM) {
// std::cout << "Batchnorm\n";
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (train == true) {
checkCUDNN(cudnnBatchNormalizationForwardTraining(cudnn_handle, cur_params->mode,
&alpha, &beta,
cur_params->input_tensor, layer_input[i],
cur_params->input_tensor, layer_input[i + 1],
cur_params->sbmv_desc,
cur_params->scale, cur_params->bias,
cur_params->factor,
cur_params->running_mean, cur_params->running_variance,
cur_params->epsilon,
cur_params->result_save_mean, cur_params->result_save_inv_var));
}
else {
checkCUDNN(cudnnBatchNormalizationForwardInference(cudnn_handle, cur_params->mode,
&alpha, &beta,
cur_params->input_tensor, layer_input[i],
cur_params->input_tensor, layer_input[i + 1],
cur_params->sbmv_desc,
cur_params->scale, cur_params->bias,
cur_params->running_mean, cur_params->running_variance,
cur_params->epsilon));
}
}
else if (layer_type[i] == POOLING) {
// std::cout << "Pooling\n";
PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i];
checkCUDNN(cudnnPoolingForward(cudnn_handle, cur_params->pool_desc,
&alpha,
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->output_tensor, layer_input[i + 1]));
}
else if (layer_type[i] == ACTV) {
// std::cout << "Actv\n";
std::cout << "Panic!! ACTV wrong place\n";
exit(0);
ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i];
checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc,
&alpha,
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->input_tensor, layer_input[i + 1]));
}
else if (layer_type[i] == SOFTMAX) {
// std::cout << "Softmax\n";
std::cout << "Panic!! SOFTMAX wrong place\n";
exit(0);
if (train == true) {
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxForward(cudnn_handle, cur_params->algo, cur_params->mode,
&alpha,
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->input_tensor, layer_input[i + 1]));
}
}
// ---------------------- vDNN start ----------------------
// synchronization
// checkCudaErrors(hipDeviceSynchronize());
// if next layer is ACTV or SOFTMAX, complete that and come to synchronization
// the case in above if for ACTV and SOFTMAX never occurs
if (layer_type[i + 1] == SOFTMAX) {
i++;
if (train == true) {
layer_input[i + 1] = layer_input[i];
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxForward(cudnn_handle, cur_params->algo, cur_params->mode,
&alpha,
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->input_tensor, layer_input[i + 1]));
}
i--;
}
struct timespec start_time, end_time;
checkCudaErrors(hipStreamSynchronize(stream_compute));
if (train)
clock_gettime(CLOCK_MONOTONIC, &start_time);
checkCudaErrors(hipStreamSynchronize(stream_memory));
if (train) {
clock_gettime(CLOCK_MONOTONIC, &end_time);
float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 + (end_time.tv_nsec - start_time.tv_nsec) * 1e-6;
fwd_vdnn_lag.push_back(lag);
}
// std::cout << "EndSynchere" << i << std::endl;
if (layer_type[i] == CONV) {
checkCNMEM(cnmemFree(cur_workspace, NULL));
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
}
if (to_offload[i] && train == true) {
checkCNMEM(cnmemFree(layer_input[i], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size);
}
if (train == false) {
checkCNMEM(cnmemFree(layer_input[i], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size);
}
if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) {
i = i + 1;
}
// std::cout << "EndSynchere" << i << std::endl;
// ---------------------- vDNN end ------------------------
}
// std::cout << "here" << std::endl;
if (train == false) {
compareOutputCorrect(correct_count, y);
checkCNMEM(cnmemFree(layer_input[num_layers - 1], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[num_layers - 1] * data_type_size);
return;
}
*scalar_loss = computeLoss();
// ---------------------- vDNN start ----------------------
checkCNMEM(cnmemMalloc(&dlayer_input[num_layers], batch_size * num_classes * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[num_layers] * data_type_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
// ---------------------- vDNN end ------------------------
if (layer_type[num_layers - 1] == SOFTMAX) {
// SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[num_layers - 1];
if (data_type == CUDNN_DATA_FLOAT) {
checkCudaErrors(hipMemset(dlayer_input[num_layers], 0, batch_size * num_classes * sizeof(float)));
hipLaunchKernelGGL(( softmaxLossBackProp<float>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, this->y, (float *)layer_input[num_layers],
(float *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps);
}
else if (data_type == CUDNN_DATA_DOUBLE) {
checkCudaErrors(hipMemset(dlayer_input[num_layers], 0, batch_size * num_classes * sizeof(double)));
hipLaunchKernelGGL(( softmaxLossBackProp<double>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, this->y, (double *)layer_input[num_layers],
(double *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps);
}
}
for (int i = num_layers - 1; i >= 0; i--) {
// ---------------------- vDNN start ----------------------
size_t cur_filter_workspace_size, cur_data_workspace_size, cur_workspace_size;
void *cur_workspace;
if (i > 0) {
if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) {
dlayer_input[i] = dlayer_input[i + 1];
}
else {
int layer_to_prefetch = findPrefetchLayer(i);
if (layer_to_prefetch != -1) {
checkCNMEM(cnmemMalloc(&layer_input[layer_to_prefetch], layer_input_size[layer_to_prefetch] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[layer_to_prefetch] * data_type_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
if (layer_to_prefetch != 0) {
checkCudaErrors(hipMemcpyAsync(layer_input[layer_to_prefetch], h_layer_input[layer_to_prefetch],
layer_input_size[layer_to_prefetch] * data_type_size, hipMemcpyHostToDevice, stream_memory));
}
else {
// std::cout << "transfer here\n";
checkCudaErrors(hipMemcpyAsync(layer_input[layer_to_prefetch], X,
layer_input_size[layer_to_prefetch] * data_type_size, hipMemcpyHostToDevice, stream_memory));
// std::cout << "transfer here\n";
}
}
checkCNMEM(cnmemMalloc(&dlayer_input[i], layer_input_size[i] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i] * data_type_size);
}
// std::cout << "Free bytes: " << free_bytes << std::endl;
}
// ---------------------- vDNN end ------------------------
if (layer_type[i] == CONV) {
// std::cout << "here\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->output_tensor, layer_input[i + 1],
&beta,
cur_params->output_tensor, dlayer_input[i + 1]));
}
// allocate space for derivative
if (!pre_alloc_conv_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size);
}
cur_filter_workspace_size = cur_params->bwd_filter_workspace_size;
if (i > 0)
cur_data_workspace_size = cur_params->bwd_data_workspace_size;
else
cur_data_workspace_size = 0;
// std::cout << "bwd cur_workspace_size: " << cur_workspace_size << std::endl;
cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size : cur_data_workspace_size;
checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL));
checkCUDNN(cudnnConvolutionBackwardBias(cudnn_handle, &alpha,
cur_params->output_tensor, dlayer_input[i + 1],
&beta,
cur_params->bias_desc, cur_params->db));
// std::cout << "neural_net: backward conv i:" << i << std::endl;
checkCUDNN(cudnnConvolutionBackwardFilter(cudnn_handle, &alpha,
cur_params->input_tensor, layer_input[i],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->conv_desc, cur_params->bwd_filter_algo,
cur_workspace, cur_workspace_size,
&beta,
cur_params->filter_desc,
cur_params->dW));
if (i > 0)
checkCUDNN(cudnnConvolutionBackwardData(cudnn_handle, &alpha,
cur_params->filter_desc, cur_params->W,
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->conv_desc, cur_params->bwd_data_algo,
cur_workspace, cur_workspace_size,
&beta,
cur_params->input_tensor, dlayer_input[i]));
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
// std::cout << "here\n";
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->output_tensor, layer_input[i + 1],
&beta,
cur_params->output_tensor, dlayer_input[i + 1]));
}
if (!pre_alloc_fc_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size);
}
if (data_type == CUDNN_DATA_FLOAT) {
// bias backward
checkCUBLAS(hipblasSgemm(cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
cur_params->C_out, 1, batch_size,
&Salpha,
(float *)dlayer_input[i + 1], cur_params->C_out,
(float *)one_vec, batch_size,
&Sbeta,
(float *)cur_params->db, cur_params->C_out));
// weight backward
checkCUBLAS(hipblasSgemm(cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_T,
cur_params->C_out, cur_params->C_in, batch_size,
&Salpha,
(float *)dlayer_input[i + 1], cur_params->C_out,
(float *)layer_input[i], cur_params->C_in,
&Sbeta,
(float *)cur_params->dW, cur_params->C_out));
// data backward
if (i > 0)
checkCUBLAS(hipblasSgemm(cublas_handle,
HIPBLAS_OP_T, HIPBLAS_OP_N,
cur_params->C_in, batch_size, cur_params->C_out,
&Salpha,
(float *)cur_params->W, cur_params->C_out,
(float *)dlayer_input[i + 1], cur_params->C_out,
&Sbeta,
(float *)dlayer_input[i], cur_params->C_in));
}
else if (data_type == CUDNN_DATA_DOUBLE) {
// bias backward
checkCUBLAS(hipblasDgemm(cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
cur_params->C_out, 1, batch_size,
&Dalpha,
(double *)dlayer_input[i + 1], cur_params->C_out,
(double *)one_vec, batch_size,
&Dbeta,
(double *)cur_params->db, cur_params->C_out));
// weight backward
checkCUBLAS(hipblasDgemm(cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_T,
cur_params->C_out, cur_params->C_in, batch_size,
&Dalpha,
(double *)dlayer_input[i + 1], cur_params->C_out,
(double *)layer_input[i], cur_params->C_in,
&Dbeta,
(double *)cur_params->dW, cur_params->C_out));
// data backward
if (i > 0)
checkCUBLAS(hipblasDgemm(cublas_handle,
HIPBLAS_OP_T, HIPBLAS_OP_N,
cur_params->C_in, batch_size, cur_params->C_out,
&Dalpha,
(double *)cur_params->W, cur_params->C_out,
(double *)dlayer_input[i + 1], cur_params->C_out,
&Dbeta,
(double *)dlayer_input[i], cur_params->C_in));
}
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == DROPOUT) {
DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i];
checkCUDNN(cudnnDropoutBackward(cudnn_handle, cur_params->dropout_desc,
cur_params->input_tensor, dlayer_input[i + 1],
cur_params->input_tensor, dlayer_input[i],
cur_params->reserved_space, cur_params->reserved_space_size));
}
else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (!pre_alloc_batch_norm_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size);
}
checkCUDNN(cudnnBatchNormalizationBackward(cudnn_handle, cur_params->mode,
&alpha, &beta,
&alpha, &beta,
cur_params->input_tensor, layer_input[i],
cur_params->input_tensor, dlayer_input[i + 1],
cur_params->input_tensor, dlayer_input[i],
cur_params->sbmv_desc, cur_params->scale,
cur_params->dscale, cur_params->dbias,
cur_params->epsilon,
cur_params->result_save_mean, cur_params->result_save_inv_var));
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == POOLING) {
PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i];
checkCUDNN(cudnnPoolingBackward(cudnn_handle, cur_params->pool_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->input_tensor, dlayer_input[i]));
}
else if (layer_type[i] == ACTV) {
ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i];
checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->input_tensor, layer_input[i + 1],
cur_params->input_tensor, dlayer_input[i + 1],
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->input_tensor, dlayer_input[i]));
continue;
}
else if (layer_type[i] == SOFTMAX) {
// std::cout << "compute here\n";
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxBackward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha,
cur_params->input_tensor, layer_input[i + 1],
cur_params->input_tensor, dlayer_input[i + 1],
&beta,
cur_params->input_tensor, dlayer_input[i]));
// std::cout << "compute here\n";
continue;
}
// ---------------------- vDNN start ----------------------
// checkCudaErrors(hipDeviceSynchronize());
struct timespec start_time, end_time;
checkCudaErrors(hipStreamSynchronize(stream_compute));
if (train)
clock_gettime(CLOCK_MONOTONIC, &start_time);
checkCudaErrors(hipStreamSynchronize(stream_memory));
if (train) {
clock_gettime(CLOCK_MONOTONIC, &end_time);
float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 + (end_time.tv_nsec - start_time.tv_nsec) * 1e-6;
bwd_vdnn_lag.insert(bwd_vdnn_lag.begin(), lag);
}
if (layer_type[i] == CONV) {
checkCNMEM(cnmemFree(cur_workspace, NULL));
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
if (!pre_alloc_conv_derivative) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size);
}
}
else if (layer_type[i] == FULLY_CONNECTED) {
if (!pre_alloc_fc_derivative) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size);
}
}
else if (layer_type[i] == BATCHNORM) {
if (train == true and !pre_alloc_batch_norm_derivative) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size);
}
}
checkCNMEM(cnmemFree(layer_input[i + 1], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size);
checkCNMEM(cnmemFree(dlayer_input[i + 1], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size);
if (i == 0) {
checkCNMEM(cnmemFree(layer_input[i], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size);
}
// ---------------------- vDNN end ------------------------
}
if (space_tracker.getConsumed() != 0) {
std::cout << "Panic!! Space not updated properly\n";
}
// exit(0);
}
int NeuralNet::findPrefetchLayer(int cur_layer) {
for (int i = cur_layer - 1; i >= 0; i--) {
if (to_offload[i] && !prefetched[i]) {
prefetched[i] = true;
return i;
}
else if (layer_type[i] == CONV) {
return -1;
}
}
return -1;
}
| b8c7be0c579dc01b4b0f287fd0402cef6fc8fd7f.cu | #include "neural_net.h"
#include <time.h>
#include <cstdio>
#include <string>
template <typename T>
__global__ void softmaxLossBackProp(int *y, T *SO, T *dSO, int batch_size, int output_size, float eps) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size)
return;
int cur_class = static_cast<int>(y[i]);
dSO[i * output_size + cur_class] = -1 / (SO[i * output_size + cur_class] * batch_size + eps);
}
template <typename T>
__global__ void computeSoftmaxLoss(T *O, int *y, float *loss, int batch_size, int num_classes, float eps) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size)
return;
loss[i] = -logf(O[i * num_classes + y[i]] + eps);
}
template <typename T>
__global__ void inferClass(T *O, int *pred_y, int batch_size, int num_classes) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size)
return;
T max = O[i * num_classes];
int index = 0;
for (int j = 1; j < num_classes; j++) {
if (O[i * num_classes + j] > max) {
max = O[i * num_classes + j];
index = j;
}
}
pred_y[i] = index;
}
float NeuralNet::computeLoss() {
if (layer_type[num_layers - 1] == SOFTMAX) {
if (data_type == CUDNN_DATA_FLOAT)
computeSoftmaxLoss<float><<<ceil(1.0 * batch_size / BW), BW>>>((float *)layer_input[num_layers], this->y, loss, batch_size, num_classes, softmax_eps);
else if (data_type == CUDNN_DATA_DOUBLE)
computeSoftmaxLoss<double><<<ceil(1.0 * batch_size / BW), BW>>>((double *)layer_input[num_layers], this->y, loss, batch_size, num_classes, softmax_eps);
}
checkCudaErrors(cudaMemcpy(h_loss, loss, batch_size * sizeof(float), cudaMemcpyDeviceToHost));
float total_loss = 0.0;
for (int i = 0; i < batch_size; i++)
total_loss += h_loss[i];
return total_loss / batch_size;
}
void NeuralNet::compareOutputCorrect(int *correct_count, int *y) {
*correct_count = 0;
if (data_type == CUDNN_DATA_FLOAT) {
float *typecast_O = (float *)layer_input[num_layers - 1];
inferClass<float><<<ceil(1.0 * batch_size / BW), BW>>>(typecast_O, pred_y, batch_size, num_classes);
checkCudaErrors(cudaMemcpy(h_pred_y, pred_y, batch_size * sizeof(int), cudaMemcpyDeviceToHost));
for (int i = 0; i < batch_size; i++) {
if (h_pred_y[i] == y[i])
*correct_count = *correct_count + 1;
}
}
else if (data_type == CUDNN_DATA_DOUBLE) {
double *typecast_O = (double *)layer_input[num_layers - 1];
inferClass<double><<<ceil(1.0 * batch_size / BW), BW>>>(typecast_O, pred_y, batch_size, num_classes);
checkCudaErrors(cudaMemcpy(h_pred_y, pred_y, batch_size * sizeof(int), cudaMemcpyDeviceToHost));
for (int i = 0; i < batch_size; i++) {
if (h_pred_y[i] == y[i])
*correct_count = *correct_count + 1;
}
}
}
NeuralNet::NeuralNet(std::vector<LayerSpecifier> &layers, DataType data_type, int batch_size, TensorFormat tensor_format,
long long dropout_seed, float softmax_eps, float init_std_dev, vDNNType vdnn_type, vDNNConvAlgo vdnn_conv_algo,
UpdateRule update_rule) {
// ---------------------- vDNN start ----------------------
checkCudaErrors(cudaStreamCreate(&stream_compute));
checkCudaErrors(cudaStreamCreate(&stream_memory));
this->vdnn_type = vdnn_type;
this->vdnn_conv_algo = vdnn_conv_algo;
// ---------------------- vDNN end ------------------------
// create handle
checkCUDNN(cudnnCreate(&cudnn_handle));
checkCUDNN(cudnnSetStream(cudnn_handle, stream_compute));
checkCUBLAS(cublasCreate(&cublas_handle));
checkCUBLAS(cublasSetStream(cublas_handle, stream_compute));
checkCURAND(curandCreateGenerator(&curand_gen, CURAND_RNG_PSEUDO_DEFAULT));
checkCURAND(curandSetStream(curand_gen, stream_compute));
checkCudaErrors(cudaMemGetInfo(&free_bytes, &total_bytes));
init_free_bytes = free_bytes;
std::cout << "Free bytes at start: " << free_bytes << std::endl;
pre_alloc_conv_derivative = false;
pre_alloc_fc_derivative = false;
pre_alloc_batch_norm_derivative = true;
if (vdnn_type == vDNN_NONE) {
pre_alloc_conv_derivative = true;
pre_alloc_fc_derivative = true;
pre_alloc_batch_norm_derivative = true;
}
if (data_type == DATA_FLOAT) {
this->data_type = CUDNN_DATA_FLOAT;
data_type_size = sizeof(float);
}
else if (data_type == DATA_DOUBLE) {
this->data_type = CUDNN_DATA_DOUBLE;
data_type_size = sizeof(double);
}
if (tensor_format == TENSOR_NCHW)
this->tensor_format = CUDNN_TENSOR_NCHW;
else if (tensor_format == TENSOR_NHWC)
this->tensor_format = CUDNN_TENSOR_NHWC;
this->batch_size = batch_size;
this->softmax_eps = softmax_eps;
this->init_std_dev = init_std_dev;
num_layers = layers.size();
// allocation of space for input to each layer
layer_input = (void **)malloc((num_layers + 1) * sizeof(void *));
layer_input_size = (int *)malloc((num_layers + 1) * sizeof(int));
dlayer_input = (void **)malloc((num_layers + 1) * sizeof(void *));
params = (void **)malloc(num_layers * sizeof(void *));
LayerDimension prev_output_size;
LayerDimension current_output_size;
for (int i = 0; i < num_layers; i++) {
layer_type.push_back(layers[i].type);
if (layers[i].type == CONV) {
ConvDescriptor *user_params = (ConvDescriptor *)layers[i].params;
params[i] = malloc(sizeof(ConvLayerParams));
((ConvLayerParams *)params[i])->initializeValues(cudnn_handle, user_params, this->data_type, batch_size, this->tensor_format,
data_type_size, current_output_size, update_rule);
}
else if (layers[i].type == FULLY_CONNECTED) {
FCDescriptor *user_params = (FCDescriptor *)layers[i].params;
params[i] = malloc(sizeof(FCLayerParams));
((FCLayerParams *)params[i])->initializeValues(user_params, batch_size, this->tensor_format, this->data_type,
current_output_size, update_rule);
}
else if (layers[i].type == DROPOUT) {
DropoutDescriptor *user_params = (DropoutDescriptor *)layers[i].params;
params[i] = malloc(sizeof(DropoutLayerParams));
((DropoutLayerParams *)params[i])->initializeValues(cudnn_handle, user_params, this->data_type, batch_size,
this->tensor_format, current_output_size);
}
else if (layers[i].type == BATCHNORM) {
BatchNormDescriptor *user_params = (BatchNormDescriptor *)layers[i].params;
params[i] = malloc(sizeof(BatchNormLayerParams));
((BatchNormLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format, batch_size,
current_output_size, update_rule);
}
else if (layers[i].type == POOLING) {
PoolingDescriptor *user_params = (PoolingDescriptor *)layers[i].params;
params[i] = malloc(sizeof(BatchNormLayerParams));
((PoolingLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format,
batch_size, current_output_size);
}
else if (layers[i].type == ACTV) {
ActivationDescriptor *user_params = (ActivationDescriptor *)layers[i].params;
params[i] = malloc(sizeof(ActivationLayerParams));
((ActivationLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format,
batch_size, current_output_size);
}
else if (layers[i].type == SOFTMAX) {
SoftmaxDescriptor *user_params = (SoftmaxDescriptor *)layers[i].params;
params[i] = malloc(sizeof(SoftmaxLayerParams));
((SoftmaxLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format,
batch_size, current_output_size);
// std::cout << current_output_size.N << ' ' << current_output_size.C << current_output_size.H << current_output_size.W << std::endl;
}
if (i == 0) {
prev_output_size = current_output_size;
}
// incomplete - have to check flatten and check exact dimension
// else if (current_output_size.getTotalSize() != prev_output_size.getTotalSize()) {
// std::cout << "Layer " << i << " output and next layer's input size mismatch\n";
// exit(0);
// }
}
// ---------------------- vDNN start ----------------------
// allocate space in host memory for layers to be transferred
h_layer_input = (void **)malloc(num_layers * sizeof(void *));
to_offload = (bool *)malloc(num_layers * sizeof(bool));
prefetched = (bool *)malloc(num_layers * sizeof(bool));
// ---------------------- vDNN end ------------------------
checkCudaErrors(cudaMemGetInfo(&free_bytes, &total_bytes));
std::cout << "Free bytes just before allocate space: " << free_bytes << std::endl;
// allocate space for parameters
// Exception BatchNorm - looks like it will take lots of space if only FC layers - space taken = size of one input
for (int i = 0; i < num_layers; i++) {
size_t input_size;
if (layers[i].type == CONV) {
ConvDescriptor *user_params = (ConvDescriptor *)layers[i].params;
((ConvLayerParams *)params[i])->allocateSpace(curand_gen, this->data_type, data_type_size, init_std_dev,
free_bytes, pre_alloc_conv_derivative);
input_size = batch_size * user_params->input_channels * user_params->input_h * user_params->input_w;
if (i == 0) {
input_channels = user_params->input_channels;
input_h = user_params->input_h;
input_w = user_params->input_w;
}
}
else if (layers[i].type == FULLY_CONNECTED) {
FCDescriptor *user_params = (FCDescriptor *)layers[i].params;
((FCLayerParams *)params[i])->allocateSpace(curand_gen, this->data_type, data_type_size, init_std_dev,
free_bytes, pre_alloc_fc_derivative);
input_size = batch_size * user_params->input_channels;
if (i == 0) {
input_channels = user_params->input_channels;
input_h = 1;
input_w = 1;
}
}
else if (layers[i].type == DROPOUT) {
DropoutDescriptor *user_params = (DropoutDescriptor *)layers[i].params;
((DropoutLayerParams *)params[i])->allocateSpace(free_bytes, cudnn_handle, user_params, dropout_seed);
input_size = batch_size * user_params->channels * user_params->h * user_params->w;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
}
else if (layers[i].type == BATCHNORM) {
BatchNormDescriptor *user_params = (BatchNormDescriptor *)layers[i].params;
((BatchNormLayerParams *)params[i])->allocateSpace(this->data_type, data_type_size,
free_bytes, pre_alloc_batch_norm_derivative);
input_size = batch_size * user_params->channels * user_params->h * user_params->w;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
}
else if (layers[i].type == POOLING) {
PoolingDescriptor *user_params = (PoolingDescriptor *)layers[i].params;
((PoolingLayerParams *)params[i])->allocateSpace(free_bytes);
input_size = batch_size * user_params->input_channels * user_params->input_h * user_params->input_w;
if (i == 0) {
input_channels = user_params->input_channels;
input_h = user_params->input_h;
input_w = user_params->input_w;
}
}
else if (layers[i].type == ACTV) {
ActivationDescriptor *user_params = (ActivationDescriptor *)layers[i].params;
((ActivationLayerParams *)params[i])->allocateSpace(free_bytes);
input_size = batch_size * user_params->channels * user_params->h * user_params->w;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
}
else if (layers[i].type == SOFTMAX) {
SoftmaxDescriptor *user_params = (SoftmaxDescriptor *)layers[i].params;
((SoftmaxLayerParams *)params[i])->allocateSpace(free_bytes);
input_size = batch_size * user_params->channels * user_params->h * user_params->w;
// assuming this is last layer, allocate for next layer as well
// checkCudaErrors(cudaMalloc(&layer_input[i + 1], input_size * data_type_size));
// checkCudaErrors(cudaMalloc(&dlayer_input[i + 1], input_size * data_type_size));
layer_input_size[i + 1] = input_size;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
if (i == num_layers - 1) {
num_classes = user_params->channels;
}
}
// do not allocate memory initially
// checkCudaErrors(cudaMalloc(&layer_input[i], input_size * data_type_size));
// checkCudaErrors(cudaMalloc(&dlayer_input[i], input_size * data_type_size));
// ---------------------- vDNN start ----------------------
layer_input_size[i] = input_size;
// ---------------------- vDNN end ------------------------
}
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemGetInfo(&free_bytes, &total_bytes));
std::cout << "Free bytes just after allocate space: " << free_bytes << std::endl;
// very small - could be allocated initially itself
checkCudaErrors(cudaMalloc((void **)&y, batch_size * sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&pred_y, batch_size * sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&loss, batch_size * sizeof(float)));
checkCudaErrors(cudaMalloc(&one_vec, batch_size * data_type_size));
if (this->data_type == CUDNN_DATA_FLOAT)
fillValue<float><<<ceil(1.0 * batch_size / BW), BW>>>((float *)one_vec, batch_size, 1);
else
fillValue<double><<<ceil(1.0 * batch_size / BW), BW>>>((double *)one_vec, batch_size, 1);
checkCudaErrors(cudaMallocHost((void **)&h_loss, batch_size * sizeof(float)));
checkCudaErrors(cudaMallocHost((void **)&h_pred_y, batch_size * sizeof(int)));
// do not allocate workspace initially
// allocate space for workspace and also keep track of algo
// size_t cur_workspace_size;
// workspace_size = 0;
// for (int i = 0; i < num_layers; i++) {
// if (layers[i].type == CONV) {
// ((ConvLayerParams *)params[i])->getWorkspaceSize(cur_workspace_size, free_bytes);
// if (cur_workspace_size > workspace_size)
// workspace_size = cur_workspace_size;
// }
// }
// checkCudaErrors(cudaMalloc(&workspace, workspace_size));
// free_bytes = free_bytes - workspace_size;
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemGetInfo(&free_bytes, &total_bytes));
// leave 600 MB and use the rest
std::cout << "Free bytes: " << free_bytes << std::endl;
// ---------------------- vDNN start ----------------------
size_t exp_max_consume, max_consume;
vDNNOptimize(exp_max_consume, max_consume);
std::cout << "actual_max_consume: " << max_consume << std::endl;
std::cout << "exp_max_consume: " << exp_max_consume << std::endl;
std::cout << "diff_max_consume(MB): " << (max_consume - exp_max_consume) / (1.0 * 1024 * 1024) << std::endl;
std::cout << "exp_free_bytes(MB): " << (free_bytes - exp_max_consume) / (1.0 * 1024 * 1024) << std::endl;
std::cout << "exp_total_consume(MB): " << (init_free_bytes - (free_bytes - exp_max_consume)) / (1.0 * 1024 * 1024) << std::endl;
std::cout << "actual_total_consume(MB): " << (init_free_bytes - (free_bytes - max_consume)) / (1.0 * 1024 * 1024) << std::endl;
// ---------------------- vDNN end ------------------------
// remove later
exit(0);
// ---------------------- vDNN start ----------------------
free_bytes = max_consume;
cnmemDevice_t cnmem_device;
size_t cnmem_stream_memory_size = free_bytes;
cnmem_device.device = 0;
cnmem_device.size = cnmem_stream_memory_size;
cnmem_device.numStreams = 0;
cnmem_device.streams = NULL;
cnmem_device.streamSizes = NULL;
// do not allow call to cudaMalloc
checkCNMEM(cnmemInit(1, &cnmem_device, CNMEM_FLAGS_CANNOT_GROW));
// ---------------------- vDNN end ------------------------
// ---------------------- vDNN start ----------------------
for (int i = 0; i < num_layers; i++) {
std::cerr << "to_offload[i] " << to_offload[i] << std::endl;
}
for (int i = 0; i < num_layers; i++) {
// allocate pinned memory in host
if (to_offload[i])
checkCudaErrors(cudaMallocHost(&h_layer_input[i], layer_input_size[i] * data_type_size));
}
// ---------------------- vDNN end ------------------------
checkCudaErrors(cudaDeviceSynchronize());
size_t temp_free_bytes;
checkCudaErrors(cudaMemGetInfo(&temp_free_bytes, &total_bytes));
std::cout << "Free bytes just before end of NeuralNet: " << temp_free_bytes << std::endl;
// {
// int n;
// std::cout << "waiting..\n";
// std::cin >> n;
// }
// data of time
checkCudaErrors(cudaEventCreate(&start_compute));
checkCudaErrors(cudaEventCreate(&stop_compute));
checkCudaErrors(cudaEventCreate(&start_transfer));
checkCudaErrors(cudaEventCreate(&stop_transfer));
}
bool NeuralNet::simulateNeuralNetworkMemory(vDNNConvAlgoPref algo_pref, bool hard, size_t &exp_max_consume, size_t &max_consume) {
CnmemSpace space_tracker(free_bytes);
max_consume = 0;
// forward pass
// allocate space for 1st input
std::cerr << "Initial Used space(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[0] * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating input(MB): " << space_tracker.getConsumed() << std::endl;
std::cerr << "Forward pass" << std::endl;
for (int i = 0; i < num_layers; i++) {
if (layer_type[i] == SOFTMAX)
break;
std::cerr << "Processing layer " << i << std::endl;
std::cerr << "Initial Used space(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i + 1] * data_type_size);
std::cerr << "Used space after output allocation(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
if (layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
size_t cur_workspace_size;
checkWORKSPACE(cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::FWD, algo_pref, hard, cur_workspace_size));
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
space_tracker.updateMaxConsume(max_consume);
if (!space_tracker.isAvailable())
return false;
std::cerr << "Used space after workspace allocation(MB): " << space_tracker.getConsumed() << std::endl;
// current layer computation over, deallocate workspace
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
std::cerr << "Used space after workspace deallocation(MB): " << space_tracker.getConsumed() << std::endl;
}
if (!space_tracker.isAvailable())
return false;
// deallocate layer input
if (to_offload[i]) {
std::cerr << "deallocating input to " << i << std::endl;
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size);
std::cerr << "Used space after deallocating input(MB): " << space_tracker.getConsumed() << std::endl;
}
}
std::cerr << "Backward pass" << std::endl;
if (batch_size * num_classes * data_type_size != layer_input_size[num_layers] * data_type_size) {
std::cout << "Panic!! Using wrong size\n";
exit(0);
}
// backward pass
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[num_layers] * data_type_size);
std::cerr << "Used space after allocating final derivative(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
// std::cerr << "max_consume: " << max_consume << std::endl;
for (int i = num_layers - 1; i >= 0; i--) {
// allocate space for previous layer derivative
std::cerr << "Processing layer " << i << std::endl;
std::cerr << "Used space initial(MB): " << space_tracker.getConsumed() << std::endl;
if (i > 0) {
if (layer_type[i] == SOFTMAX)
continue;
else {
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i] * data_type_size);
std::cerr << "Used space after allocating prev. derivative(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
}
// std::cerr << "max_consume: " << max_consume << std::endl;
}
int layer_to_prefetch = findPrefetchLayer(i);
// if layer to be prefetched, allocate space for that layer
if (layer_to_prefetch != -1) {
std::cerr << "Prefetch layer " << layer_to_prefetch << std::endl;
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[layer_to_prefetch] * data_type_size);
std::cerr << "Used space after allocating prefetch(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
}
if (layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
size_t cur_filter_workspace_size;
checkWORKSPACE(cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::BWD_FILTER, algo_pref, hard, cur_filter_workspace_size));
size_t cur_data_workspace_size = 0;
if (i > 0)
checkWORKSPACE(cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::BWD_DATA, algo_pref, hard, cur_data_workspace_size));
size_t cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size :cur_data_workspace_size;
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
std::cerr << "Used space after allocating workspace(MB): " << space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
if (!pre_alloc_conv_derivative) {
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
// std::cerr << "max_consume: " << max_consume << std::endl;
if (!space_tracker.isAvailable())
return false;
// current layer computation over, deallocate workspace
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
std::cerr << "Used space after deallocating workspace(MB): " << space_tracker.getConsumed() << std::endl;
if (!pre_alloc_conv_derivative) {
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size);
std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
}
else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (!pre_alloc_fc_derivative) {
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
if (!space_tracker.isAvailable())
return false;
if (!pre_alloc_fc_derivative) {
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size);
std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
}
else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (!pre_alloc_batch_norm_derivative) {
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
if (!space_tracker.isAvailable())
return false;
if (!pre_alloc_batch_norm_derivative) {
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size);
std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl;
}
}
if (!space_tracker.isAvailable())
return false;
// deallocate layer output and derivative
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size);
std::cerr << "Used space after deallocating output, derivative(MB): " << space_tracker.getConsumed() << std::endl;
// if 1st layer, deallocate input layer also
if (i == 0) {
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size);
std::cerr << "Used space after deallocating input(MB): " << space_tracker.getConsumed() << std::endl;
}
}
if (space_tracker.getConsumed() > 0)
std::cerr << "Panic!! more free bytes\n";
if (space_tracker.getConsumed() != 0)
std::cerr << "Panic!! bytes not freed properly\n";
// return true;
exp_max_consume = max_consume;
// check with cnmem once
bool ret_val = simulateCNMEMMemory(max_consume);
return ret_val;
}
bool NeuralNet::simulateCNMEMMemory(size_t &max_consume) {
size_t init_max_consume = max_consume;
cnmemDevice_t cnmem_device;
size_t t;
checkCudaErrors(cudaMemGetInfo(&free_bytes, &t));
std::cout << "free_bytes: " << free_bytes << std::endl;
free_bytes -= 100 * 1024 * 1024;
cnmem_device.device = 0;
cnmem_device.numStreams = 0;
cnmem_device.streams = NULL;
cnmem_device.streamSizes = NULL;
std::string cnmem_memory_state_filename;
if (vdnn_type == vDNN_ALL) {
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_all_p.dat";
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_all_m.dat";
}
}
else if (vdnn_type == vDNN_CONV) {
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_conv_p.dat";
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_conv_m.dat";
}
}
else if (vdnn_type == vDNN_DYN) {
cnmem_memory_state_filename = "cnmem_dyn.dat";
}
else {
cnmem_memory_state_filename = "cnmem_unknown.dat";
}
FILE *cnmem_memory_state_fptr = fopen(cnmem_memory_state_filename.c_str(), "w");
size_t run_count = 0;
bool out_of_memory = false;
while (true) {
run_count++;
if (max_consume >= free_bytes)
break;
out_of_memory = false;
cnmem_device.size = max_consume;
std::cerr << run_count << ' ' << max_consume << std::endl;
if (max_consume > free_bytes)
std::cerr << "panic!! max_consume > free_bytes\n";
checkCNMEM(cnmemInit(1, &cnmem_device, CNMEM_FLAGS_CANNOT_GROW));
resetPrefetched();
fprintf(cnmem_memory_state_fptr, "//////////////////////////////////////////////////////////////////\n");
fprintf(cnmem_memory_state_fptr, "run_count: %lu\n", run_count);
fprintf(cnmem_memory_state_fptr, "max_consume: %lu\n", max_consume);
fprintf(cnmem_memory_state_fptr, "//////////////////////////////////////////////////////////////////\n");
fprintf(cnmem_memory_state_fptr, "initial state\n");
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
if (to_offload[0]) {
checkCNMEMSim(cnmemMalloc(&layer_input[0], layer_input_size[0] * data_type_size, NULL),
layer_input_size[0] * data_type_size, max_consume, free_bytes, checkCNMEM(cnmemFinalize()); continue, out_of_memory);
}
else {
checkCNMEMSim(cnmemMallocRight(&layer_input[0], layer_input_size[0] * data_type_size, NULL),
layer_input_size[0] * data_type_size, max_consume, free_bytes, checkCNMEM(cnmemFinalize()); continue, out_of_memory);
}
fprintf(cnmem_memory_state_fptr, "after alloc. layer_input[%d] - size: %lu\n", 0, layer_input_size[0] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
// forward propagate
for (int i = 0; i < num_layers; i++) {
size_t cur_workspace_size;
void *cur_workspace;
if (to_offload[i + 1]) {
checkCNMEMSim(cnmemMalloc(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL),
layer_input_size[i + 1] * data_type_size, max_consume, free_bytes, break, out_of_memory);
}
else {
checkCNMEMSimRight(cnmemMallocRight(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL),
layer_input_size[i + 1] * data_type_size, max_consume, free_bytes, break, out_of_memory);
}
fprintf(cnmem_memory_state_fptr, "after alloc. layer_input[%d] - size: %lu\n", i + 1, layer_input_size[i + 1] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
if (layer_type[i] == CONV) {
// std::cout << "conv\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_workspace_size = cur_params->fwd_workspace_size;
checkCNMEMSimRight(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL),
cur_workspace_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. conv. workspace - size: %lu\n", cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
if (layer_type[i] == CONV) {
checkCNMEMSim(cnmemFree(cur_workspace, NULL),
cur_workspace_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free conv. workspace - size: %lu\n", cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
if (to_offload[i]) {
checkCNMEMSim(cnmemFree(layer_input[i], NULL),
layer_input_size[i] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free layer_input[%d] - size: %lu\n", i, layer_input_size[i] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) {
i = i + 1;
}
}
if (out_of_memory) {
checkCNMEM(cnmemFinalize());
if (max_consume < free_bytes)
continue;
else
break;
}
checkCNMEMSim(cnmemMalloc(&dlayer_input[num_layers], batch_size * num_classes * data_type_size, NULL),
layer_input_size[num_layers] * data_type_size, max_consume, free_bytes, checkCNMEM(cnmemFinalize()); continue, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. dlayer_input[%d] - size: %lu\n", num_layers, layer_input_size[num_layers] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
for (int i = num_layers - 1; i >= 0; i--) {
// ---------------------- vDNN start ----------------------
size_t cur_filter_workspace_size, cur_data_workspace_size, cur_workspace_size;
void *cur_workspace;
if (i > 0) {
if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) {
dlayer_input[i] = dlayer_input[i + 1];
}
else {
int layer_to_prefetch = findPrefetchLayer(i);
if (layer_to_prefetch != -1) {
checkCNMEMSim(cnmemMalloc(&layer_input[layer_to_prefetch], layer_input_size[layer_to_prefetch] * data_type_size, NULL),
layer_input_size[layer_to_prefetch] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. prefetch layer_input[%d] - size: %lu\n", layer_to_prefetch, layer_input_size[layer_to_prefetch] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
checkCNMEMSim(cnmemMalloc(&dlayer_input[i], layer_input_size[i] * data_type_size, NULL),
layer_input_size[i] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. dlayer_input[%d] - size: %lu\n", i, layer_input_size[i] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
if (layer_type[i] == CONV) {
// std::cout << "here\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
// allocate space for derivative
if (!pre_alloc_conv_derivative) {
if (!cur_params->cnmemAllocDerivativesCheck(data_type_size, NULL, max_consume, free_bytes, out_of_memory))
break;
fprintf(cnmem_memory_state_fptr, "after alloc. dW - size: %lu\n", cur_params->kernel_size * data_type_size);
fprintf(cnmem_memory_state_fptr, "after alloc. db - size: %lu\n", cur_params->C_out * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
cur_filter_workspace_size = cur_params->bwd_filter_workspace_size;
if (i > 0)
cur_data_workspace_size = cur_params->bwd_data_workspace_size;
else
cur_data_workspace_size = 0;
cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size : cur_data_workspace_size;
checkCNMEMSim(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL),
cur_workspace_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after alloc. conv. workspace - size: %lu\n", cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (!pre_alloc_fc_derivative) {
if (!cur_params->cnmemAllocDerivativesCheck(data_type_size, NULL, max_consume, free_bytes, out_of_memory))
break;
fprintf(cnmem_memory_state_fptr, "after alloc. dW - size: %lu\n", cur_params->weight_matrix_size * data_type_size);
fprintf(cnmem_memory_state_fptr, "after alloc. db - size: %lu\n", cur_params->C_out * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (!pre_alloc_batch_norm_derivative) {
if (!cur_params->cnmemAllocDerivativesCheck(data_type_size, NULL, max_consume, free_bytes, out_of_memory))
break;
fprintf(cnmem_memory_state_fptr, "after alloc. dscale - size: %lu\n", cur_params->allocation_size * data_type_size);
fprintf(cnmem_memory_state_fptr, "after alloc. dbias - size: %lu\n", cur_params->allocation_size * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
else if (layer_type[i] == SOFTMAX) {
// std::cout << "compute here\n";
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
continue;
}
if (layer_type[i] == CONV) {
checkCNMEMSim(cnmemFree(cur_workspace, NULL),
cur_workspace_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free conv. workspace - size: %lu\n", cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
if (!pre_alloc_conv_derivative) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n", (long unsigned)0);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
else if (layer_type[i] == FULLY_CONNECTED) {
if (!pre_alloc_fc_derivative) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n", (long unsigned)0);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
else if (layer_type[i] == BATCHNORM) {
if (!pre_alloc_batch_norm_derivative) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n", (long unsigned)0);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
checkCNMEMSim(cnmemFree(layer_input[i + 1], NULL),
layer_input_size[i + 1] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free layer_input[%d] - size: %lu\n", i + 1, layer_input_size[i + 1] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
checkCNMEMSim(cnmemFree(dlayer_input[i + 1], NULL),
layer_input_size[i + 1] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free dlayer_input[%d] - size: %lu\n", i + 1, layer_input_size[i + 1] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
if (i == 0) {
checkCNMEMSim(cnmemFree(layer_input[i], NULL),
layer_input_size[i] * data_type_size, max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr, "after free layer_input[%d] - size: %lu\n", i, layer_input_size[i] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
checkCNMEM(cnmemFinalize());
if (out_of_memory) {
if (max_consume < free_bytes)
continue;
else
break;
}
break;
}
free_bytes += 100 * 1024 * 1024;
if (max_consume < free_bytes) {
double exp_size = (init_max_consume + init_free_bytes - free_bytes) / (1.0 * 1024 * 1024);
double act_size = (max_consume + init_free_bytes - free_bytes) / (1.0 * 1024 * 1024);
fprintf(cnmem_memory_state_fptr, "expected_memory_consume: %f MB\n", exp_size);
fprintf(cnmem_memory_state_fptr, "actual_memory_consume: %f MB\n", act_size);
}
else {
fprintf(cnmem_memory_state_fptr, "out of memory\n");
}
fclose(cnmem_memory_state_fptr);
if (max_consume < free_bytes)
return true;
else
return false;
}
void NeuralNet::vDNNOptimize(size_t &exp_max_consume, size_t &max_consume) {
bool hard = true, soft = false;
// if type is vDNN_ALL or vDNN_CONV, check if sufficient space is available
if (vdnn_type == vDNN_ALL) {
setOffload(OFFLOAD_ALL);
resetPrefetched();
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
return;
}
else if (vdnn_type == vDNN_CONV) {
setOffload(OFFLOAD_CONV);
resetPrefetched();
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
return;
}
else if (vdnn_type == vDNN_NONE) {
setOffload(OFFLOAD_NONE);
resetPrefetched();
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
}
return;
}
if (vdnn_type == vDNN_DYN) {
// check for trainability
std::cerr << "vDNN_DYN\n";
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if(!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume))
outOfMemory();
// check if work with fastest algo and no offload, if so, select it and return
setOffload(NeuralNet::OFFLOAD_NONE);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) {
std::cerr << "Choosing PERF_OPT, NO OFFLOAD\n";
return;
}
// check if conv offload and fastest algo works, then check if all offload and fastest algo works
setOffload(NeuralNet::OFFLOAD_CONV);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) {
std::cerr << "Choosing PERF_OPT, CONV OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) {
std::cerr << "Choosing PERF_OPT, ALL OFFLOAD\n";
return;
}
// optimize using greedy algo memory usage while improving performance
setOffload(NeuralNet::OFFLOAD_CONV);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, soft, exp_max_consume, max_consume)) {
std::cerr << "Choosing GREEDY, CONV OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, soft, exp_max_consume, max_consume)) {
std::cerr << "Choosing GREEDY, ALL OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_CONV);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) {
std::cerr << "Choosing MEM_OPT, CONV OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if(simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) {
std::cerr << "Choosing MEM_OPT, ALL OFFLOAD\n";
return;
}
}
exit(0);
}
void NeuralNet::setOffload(NeuralNet::OffloadType offload_type) {
if (offload_type == OFFLOAD_NONE) {
for (int i = 0; i < num_layers; i++)
to_offload[i] = false;
}
else if (offload_type == OFFLOAD_CONV) {
for (int i = 0; i < num_layers; i++) {
if (layer_type[i] == CONV)
to_offload[i] = true;
else
to_offload[i] = false;
}
// set last non SOFTMAX/ACTV layer to no_offload
for (int i = num_layers - 1; i >= 0; i--) {
if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV)
;
else {
to_offload[i] = false;
break;
}
}
}
else if (offload_type == OFFLOAD_ALL) {
for (int i = 0; i < num_layers; i++) {
if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX)
to_offload[i] = false;
else
to_offload[i] = true;
}
// set last non SOFTMAX/ACTV layer to no_offload
for (int i = num_layers - 1; i >= 0; i--) {
if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV)
;
else {
to_offload[i] = false;
break;
}
}
}
}
void NeuralNet::resetPrefetched() {
for (int i = 0; i < num_layers; i++)
prefetched[i] = false;
}
void NeuralNet::getLoss(void *X, int *y, double learning_rate, bool train, int *correct_count, float *loss) {
std::vector<float> t1, t2;
this->getLoss(X, y, learning_rate, t1, t2, train, correct_count, loss);
}
void NeuralNet::getLoss(void *X, int *y, double learning_rate, std::vector<float> &fwd_vdnn_lag, std::vector<float> &bwd_vdnn_lag, bool train, int *correct_count, float *scalar_loss) {
CnmemSpace space_tracker(free_bytes);
// std::cout << "here\n";
// std::cout << "Free bytes: " << free_bytes << std::endl;
for (int i = 0; i < num_layers; i++)
prefetched[i] = false;
checkCNMEM(cnmemMalloc(&layer_input[0], layer_input_size[0] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[0] * data_type_size);
checkCudaErrors(cudaMemcpy(layer_input[0], X, batch_size * input_channels * input_h * input_w * data_type_size, cudaMemcpyHostToDevice));
if (train == true) {
checkCudaErrors(cudaMemcpy(this->y, y, batch_size * data_type_size, cudaMemcpyHostToDevice));
}
float alpha = 1.0, beta = 0.0;
float Salpha = 1.0, Sbeta = 0.0;
double Dalpha = 1.0, Dbeta = 0.0;
// forward propagate
for (int i = 0; i < num_layers; i++) {
if (train == false && i == num_layers - 1)
break;
// ---------------------- vDNN start ----------------------
size_t cur_workspace_size;
void *cur_workspace;
// offload if required
if (i > 0 && to_offload[i] && train == true)
checkCudaErrors(cudaMemcpyAsync(h_layer_input[i], layer_input[i],
layer_input_size[i] * data_type_size, cudaMemcpyDeviceToHost, stream_memory));
checkCNMEM(cnmemMalloc(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i + 1] * data_type_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
// ---------------------- vDNN end ------------------------
// std::cout << "here" << i << std::endl;
if (layer_type[i] == CONV) {
// std::cout << "conv\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_workspace_size = cur_params->fwd_workspace_size;
checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL));
// computation
checkCUDNN(cudnnConvolutionForward(cudnn_handle, &alpha,
cur_params->input_tensor, layer_input[i],
cur_params->filter_desc, cur_params->W,
cur_params->conv_desc, cur_params->fwd_algo,
cur_workspace, cur_workspace_size,
&beta,
cur_params->output_tensor, layer_input[i + 1]));
checkCUDNN(cudnnAddTensor(cudnn_handle, &alpha,
cur_params->bias_desc, cur_params->b,
&alpha,
cur_params->output_tensor, layer_input[i + 1]));
// if activation required
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc,
&alpha,
cur_params->output_tensor, layer_input[i + 1],
&beta,
cur_params->output_tensor, layer_input[i + 1]));
}
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
}
else if (layer_type[i] == FULLY_CONNECTED) {
// std::cout << "FC\n";
FCLayerParams *cur_params = (FCLayerParams *)params[i];
// std::cout << "FChere" << i << std::endl;
if (data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(cublasSgemm(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
cur_params->C_out, batch_size, cur_params->C_in,
&Salpha,
(float *)cur_params->W, cur_params->C_out,
(float *)layer_input[i], cur_params->C_in,
&Sbeta,
(float *)layer_input[i + 1], cur_params->C_out));
checkCUBLAS(cublasSgemm(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
cur_params->C_out, batch_size, 1,
&Salpha,
(float *)cur_params->b, cur_params->C_out,
(float *)one_vec, 1,
&Salpha,
(float *)layer_input[i + 1], cur_params->C_out));
}
else if (data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(cublasDgemm(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
cur_params->C_out, batch_size, cur_params->C_in,
&Dalpha,
(double *)cur_params->W, cur_params->C_out,
(double *)layer_input[i], cur_params->C_in,
&Dbeta,
(double *)layer_input[i + 1], cur_params->C_out));
checkCUBLAS(cublasDgemm(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
cur_params->C_out, batch_size, 1,
&Dalpha,
(double *)cur_params->b, cur_params->C_out,
(double *)one_vec, 1,
&Dalpha,
(double *)layer_input[i + 1], cur_params->C_out));
}
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc,
&alpha,
cur_params->output_tensor, layer_input[i + 1],
&beta,
cur_params->output_tensor, layer_input[i + 1]));
}
// std::cout << "FChere" << i << std::endl;
}
else if (layer_type[i] == DROPOUT) {
// std::cout << "Dropout\n";
DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i];
checkCUDNN(cudnnDropoutForward(cudnn_handle, cur_params->dropout_desc,
cur_params->input_tensor, layer_input[i],
cur_params->input_tensor, layer_input[i + 1],
cur_params->reserved_space,
cur_params->reserved_space_size));
}
else if (layer_type[i] == BATCHNORM) {
// std::cout << "Batchnorm\n";
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (train == true) {
checkCUDNN(cudnnBatchNormalizationForwardTraining(cudnn_handle, cur_params->mode,
&alpha, &beta,
cur_params->input_tensor, layer_input[i],
cur_params->input_tensor, layer_input[i + 1],
cur_params->sbmv_desc,
cur_params->scale, cur_params->bias,
cur_params->factor,
cur_params->running_mean, cur_params->running_variance,
cur_params->epsilon,
cur_params->result_save_mean, cur_params->result_save_inv_var));
}
else {
checkCUDNN(cudnnBatchNormalizationForwardInference(cudnn_handle, cur_params->mode,
&alpha, &beta,
cur_params->input_tensor, layer_input[i],
cur_params->input_tensor, layer_input[i + 1],
cur_params->sbmv_desc,
cur_params->scale, cur_params->bias,
cur_params->running_mean, cur_params->running_variance,
cur_params->epsilon));
}
}
else if (layer_type[i] == POOLING) {
// std::cout << "Pooling\n";
PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i];
checkCUDNN(cudnnPoolingForward(cudnn_handle, cur_params->pool_desc,
&alpha,
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->output_tensor, layer_input[i + 1]));
}
else if (layer_type[i] == ACTV) {
// std::cout << "Actv\n";
std::cout << "Panic!! ACTV wrong place\n";
exit(0);
ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i];
checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc,
&alpha,
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->input_tensor, layer_input[i + 1]));
}
else if (layer_type[i] == SOFTMAX) {
// std::cout << "Softmax\n";
std::cout << "Panic!! SOFTMAX wrong place\n";
exit(0);
if (train == true) {
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxForward(cudnn_handle, cur_params->algo, cur_params->mode,
&alpha,
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->input_tensor, layer_input[i + 1]));
}
}
// ---------------------- vDNN start ----------------------
// synchronization
// checkCudaErrors(cudaDeviceSynchronize());
// if next layer is ACTV or SOFTMAX, complete that and come to synchronization
// the case in above if for ACTV and SOFTMAX never occurs
if (layer_type[i + 1] == SOFTMAX) {
i++;
if (train == true) {
layer_input[i + 1] = layer_input[i];
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxForward(cudnn_handle, cur_params->algo, cur_params->mode,
&alpha,
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->input_tensor, layer_input[i + 1]));
}
i--;
}
struct timespec start_time, end_time;
checkCudaErrors(cudaStreamSynchronize(stream_compute));
if (train)
clock_gettime(CLOCK_MONOTONIC, &start_time);
checkCudaErrors(cudaStreamSynchronize(stream_memory));
if (train) {
clock_gettime(CLOCK_MONOTONIC, &end_time);
float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 + (end_time.tv_nsec - start_time.tv_nsec) * 1e-6;
fwd_vdnn_lag.push_back(lag);
}
// std::cout << "EndSynchere" << i << std::endl;
if (layer_type[i] == CONV) {
checkCNMEM(cnmemFree(cur_workspace, NULL));
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
}
if (to_offload[i] && train == true) {
checkCNMEM(cnmemFree(layer_input[i], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size);
}
if (train == false) {
checkCNMEM(cnmemFree(layer_input[i], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size);
}
if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) {
i = i + 1;
}
// std::cout << "EndSynchere" << i << std::endl;
// ---------------------- vDNN end ------------------------
}
// std::cout << "here" << std::endl;
if (train == false) {
compareOutputCorrect(correct_count, y);
checkCNMEM(cnmemFree(layer_input[num_layers - 1], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[num_layers - 1] * data_type_size);
return;
}
*scalar_loss = computeLoss();
// ---------------------- vDNN start ----------------------
checkCNMEM(cnmemMalloc(&dlayer_input[num_layers], batch_size * num_classes * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[num_layers] * data_type_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
// ---------------------- vDNN end ------------------------
if (layer_type[num_layers - 1] == SOFTMAX) {
// SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[num_layers - 1];
if (data_type == CUDNN_DATA_FLOAT) {
checkCudaErrors(cudaMemset(dlayer_input[num_layers], 0, batch_size * num_classes * sizeof(float)));
softmaxLossBackProp<float><<<ceil(1.0 * batch_size / BW), BW>>>(this->y, (float *)layer_input[num_layers],
(float *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps);
}
else if (data_type == CUDNN_DATA_DOUBLE) {
checkCudaErrors(cudaMemset(dlayer_input[num_layers], 0, batch_size * num_classes * sizeof(double)));
softmaxLossBackProp<double><<<ceil(1.0 * batch_size / BW), BW>>>(this->y, (double *)layer_input[num_layers],
(double *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps);
}
}
for (int i = num_layers - 1; i >= 0; i--) {
// ---------------------- vDNN start ----------------------
size_t cur_filter_workspace_size, cur_data_workspace_size, cur_workspace_size;
void *cur_workspace;
if (i > 0) {
if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) {
dlayer_input[i] = dlayer_input[i + 1];
}
else {
int layer_to_prefetch = findPrefetchLayer(i);
if (layer_to_prefetch != -1) {
checkCNMEM(cnmemMalloc(&layer_input[layer_to_prefetch], layer_input_size[layer_to_prefetch] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[layer_to_prefetch] * data_type_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
if (layer_to_prefetch != 0) {
checkCudaErrors(cudaMemcpyAsync(layer_input[layer_to_prefetch], h_layer_input[layer_to_prefetch],
layer_input_size[layer_to_prefetch] * data_type_size, cudaMemcpyHostToDevice, stream_memory));
}
else {
// std::cout << "transfer here\n";
checkCudaErrors(cudaMemcpyAsync(layer_input[layer_to_prefetch], X,
layer_input_size[layer_to_prefetch] * data_type_size, cudaMemcpyHostToDevice, stream_memory));
// std::cout << "transfer here\n";
}
}
checkCNMEM(cnmemMalloc(&dlayer_input[i], layer_input_size[i] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i] * data_type_size);
}
// std::cout << "Free bytes: " << free_bytes << std::endl;
}
// ---------------------- vDNN end ------------------------
if (layer_type[i] == CONV) {
// std::cout << "here\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->output_tensor, layer_input[i + 1],
&beta,
cur_params->output_tensor, dlayer_input[i + 1]));
}
// allocate space for derivative
if (!pre_alloc_conv_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size);
}
cur_filter_workspace_size = cur_params->bwd_filter_workspace_size;
if (i > 0)
cur_data_workspace_size = cur_params->bwd_data_workspace_size;
else
cur_data_workspace_size = 0;
// std::cout << "bwd cur_workspace_size: " << cur_workspace_size << std::endl;
cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size : cur_data_workspace_size;
checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL));
checkCUDNN(cudnnConvolutionBackwardBias(cudnn_handle, &alpha,
cur_params->output_tensor, dlayer_input[i + 1],
&beta,
cur_params->bias_desc, cur_params->db));
// std::cout << "neural_net: backward conv i:" << i << std::endl;
checkCUDNN(cudnnConvolutionBackwardFilter(cudnn_handle, &alpha,
cur_params->input_tensor, layer_input[i],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->conv_desc, cur_params->bwd_filter_algo,
cur_workspace, cur_workspace_size,
&beta,
cur_params->filter_desc,
cur_params->dW));
if (i > 0)
checkCUDNN(cudnnConvolutionBackwardData(cudnn_handle, &alpha,
cur_params->filter_desc, cur_params->W,
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->conv_desc, cur_params->bwd_data_algo,
cur_workspace, cur_workspace_size,
&beta,
cur_params->input_tensor, dlayer_input[i]));
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
// std::cout << "here\n";
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->output_tensor, layer_input[i + 1],
&beta,
cur_params->output_tensor, dlayer_input[i + 1]));
}
if (!pre_alloc_fc_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size);
}
if (data_type == CUDNN_DATA_FLOAT) {
// bias backward
checkCUBLAS(cublasSgemm(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
cur_params->C_out, 1, batch_size,
&Salpha,
(float *)dlayer_input[i + 1], cur_params->C_out,
(float *)one_vec, batch_size,
&Sbeta,
(float *)cur_params->db, cur_params->C_out));
// weight backward
checkCUBLAS(cublasSgemm(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_T,
cur_params->C_out, cur_params->C_in, batch_size,
&Salpha,
(float *)dlayer_input[i + 1], cur_params->C_out,
(float *)layer_input[i], cur_params->C_in,
&Sbeta,
(float *)cur_params->dW, cur_params->C_out));
// data backward
if (i > 0)
checkCUBLAS(cublasSgemm(cublas_handle,
CUBLAS_OP_T, CUBLAS_OP_N,
cur_params->C_in, batch_size, cur_params->C_out,
&Salpha,
(float *)cur_params->W, cur_params->C_out,
(float *)dlayer_input[i + 1], cur_params->C_out,
&Sbeta,
(float *)dlayer_input[i], cur_params->C_in));
}
else if (data_type == CUDNN_DATA_DOUBLE) {
// bias backward
checkCUBLAS(cublasDgemm(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
cur_params->C_out, 1, batch_size,
&Dalpha,
(double *)dlayer_input[i + 1], cur_params->C_out,
(double *)one_vec, batch_size,
&Dbeta,
(double *)cur_params->db, cur_params->C_out));
// weight backward
checkCUBLAS(cublasDgemm(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_T,
cur_params->C_out, cur_params->C_in, batch_size,
&Dalpha,
(double *)dlayer_input[i + 1], cur_params->C_out,
(double *)layer_input[i], cur_params->C_in,
&Dbeta,
(double *)cur_params->dW, cur_params->C_out));
// data backward
if (i > 0)
checkCUBLAS(cublasDgemm(cublas_handle,
CUBLAS_OP_T, CUBLAS_OP_N,
cur_params->C_in, batch_size, cur_params->C_out,
&Dalpha,
(double *)cur_params->W, cur_params->C_out,
(double *)dlayer_input[i + 1], cur_params->C_out,
&Dbeta,
(double *)dlayer_input[i], cur_params->C_in));
}
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == DROPOUT) {
DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i];
checkCUDNN(cudnnDropoutBackward(cudnn_handle, cur_params->dropout_desc,
cur_params->input_tensor, dlayer_input[i + 1],
cur_params->input_tensor, dlayer_input[i],
cur_params->reserved_space, cur_params->reserved_space_size));
}
else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (!pre_alloc_batch_norm_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size);
}
checkCUDNN(cudnnBatchNormalizationBackward(cudnn_handle, cur_params->mode,
&alpha, &beta,
&alpha, &beta,
cur_params->input_tensor, layer_input[i],
cur_params->input_tensor, dlayer_input[i + 1],
cur_params->input_tensor, dlayer_input[i],
cur_params->sbmv_desc, cur_params->scale,
cur_params->dscale, cur_params->dbias,
cur_params->epsilon,
cur_params->result_save_mean, cur_params->result_save_inv_var));
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == POOLING) {
PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i];
checkCUDNN(cudnnPoolingBackward(cudnn_handle, cur_params->pool_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->input_tensor, dlayer_input[i]));
}
else if (layer_type[i] == ACTV) {
ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i];
checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->input_tensor, layer_input[i + 1],
cur_params->input_tensor, dlayer_input[i + 1],
cur_params->input_tensor, layer_input[i],
&beta,
cur_params->input_tensor, dlayer_input[i]));
continue;
}
else if (layer_type[i] == SOFTMAX) {
// std::cout << "compute here\n";
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxBackward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha,
cur_params->input_tensor, layer_input[i + 1],
cur_params->input_tensor, dlayer_input[i + 1],
&beta,
cur_params->input_tensor, dlayer_input[i]));
// std::cout << "compute here\n";
continue;
}
// ---------------------- vDNN start ----------------------
// checkCudaErrors(cudaDeviceSynchronize());
struct timespec start_time, end_time;
checkCudaErrors(cudaStreamSynchronize(stream_compute));
if (train)
clock_gettime(CLOCK_MONOTONIC, &start_time);
checkCudaErrors(cudaStreamSynchronize(stream_memory));
if (train) {
clock_gettime(CLOCK_MONOTONIC, &end_time);
float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 + (end_time.tv_nsec - start_time.tv_nsec) * 1e-6;
bwd_vdnn_lag.insert(bwd_vdnn_lag.begin(), lag);
}
if (layer_type[i] == CONV) {
checkCNMEM(cnmemFree(cur_workspace, NULL));
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
if (!pre_alloc_conv_derivative) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size);
}
}
else if (layer_type[i] == FULLY_CONNECTED) {
if (!pre_alloc_fc_derivative) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size);
}
}
else if (layer_type[i] == BATCHNORM) {
if (train == true and !pre_alloc_batch_norm_derivative) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size);
}
}
checkCNMEM(cnmemFree(layer_input[i + 1], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size);
checkCNMEM(cnmemFree(dlayer_input[i + 1], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size);
if (i == 0) {
checkCNMEM(cnmemFree(layer_input[i], NULL));
space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size);
}
// ---------------------- vDNN end ------------------------
}
if (space_tracker.getConsumed() != 0) {
std::cout << "Panic!! Space not updated properly\n";
}
// exit(0);
}
int NeuralNet::findPrefetchLayer(int cur_layer) {
for (int i = cur_layer - 1; i >= 0; i--) {
if (to_offload[i] && !prefetched[i]) {
prefetched[i] = true;
return i;
}
else if (layer_type[i] == CONV) {
return -1;
}
}
return -1;
}
|
bdb549c7b5896eb0a9a6d91b967748d0171ed8d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void rgb2yuvKernel(int *imgr,int *imgg,int *imgb,int *imgy,int *imgcb,int *imgcr, int n) {
int r, g, b;
int y, cb, cr;
int index;
index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n){
r = imgr[index];
g = imgg[index];
b = imgb[index];
y = (int)( 0.299*r + 0.587*g + 0.114*b);
cb = (int)(-0.147*r - 0.289*g + 0.436*b);
cr = (int)( 0.615*r - 0.515*g - 0.1*b);
imgy[index] = y;
imgcb[index] = cb;
imgcr[index] = cr;
}
} | bdb549c7b5896eb0a9a6d91b967748d0171ed8d5.cu | #include "includes.h"
__global__ void rgb2yuvKernel(int *imgr,int *imgg,int *imgb,int *imgy,int *imgcb,int *imgcr, int n) {
int r, g, b;
int y, cb, cr;
int index;
index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n){
r = imgr[index];
g = imgg[index];
b = imgb[index];
y = (int)( 0.299*r + 0.587*g + 0.114*b);
cb = (int)(-0.147*r - 0.289*g + 0.436*b);
cr = (int)( 0.615*r - 0.515*g - 0.1*b);
imgy[index] = y;
imgcb[index] = cb;
imgcr[index] = cr;
}
} |
b083fc96b046600d19ebab7d05a813eda8b30597.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ----
// ---- Computes the potential field for a volume
// ---- Input: volume file, dimensions: X, Y, Z, output file name
// ---- Output: normalized potential field:
// 1 vector for each point in the volume
//
// Last change: Thu May 15 15:20:38 EDT 2003 by Nicu D. Cornea
//
//
// #define TRACE
#include "potVect.h"
#include <thrust/sort.h>
#define BOUND_SIZE 1200000
struct compareStruct {
__host__ __device__
bool operator()(VoxelPosition a, VoxelPosition b) {
if(a.z != b.z)
return a.z < b.z;
else if(a.y != b.y)
return a.y < b.y;
else
return a.x < b.x;
}
};
bool SortBoundaryArray(int numBound, VoxelPosition Bound[]);
/*
this kernel normalizes the force field by calculating the multiplication of magnitudes of force in
x,y,z direction and dividing it by the final multiplication.
*/
__global__ void normalize_vector(Vector* force,unsigned char* f, bool inOut,int slsz,int L)
{
int k=blockIdx.x;
int j=threadIdx.x;
int i=blockIdx.y;
int idx=k*slsz + j*L + i;
if(!inOut) {
// only for interior voxels we had calculated forces
if(f[idx] == EXTERIOR) return;
}
float r = force[idx].xd*force[idx].xd +
force[idx].yd*force[idx].yd +
force[idx].zd*force[idx].zd;
if(r > 0.00) {
r = sqrtf(r);
force[idx].xd = force[idx].xd / r;
force[idx].yd = force[idx].yd / r;
force[idx].zd = force[idx].zd / r;
}
}
/*
This kernel computes potential field at every inside voxel using
boundary voxel as point charges
*/
__global__ void compute_potential_field(VoxelPosition *Bound,Vector* force,int numBound,unsigned char* f,bool inOut,int slsz,int sz,int L, int fieldStrenght)
{
int k=blockIdx.x;
int j=threadIdx.x;
int i=blockIdx.y;
int zStartIndex = 0;
int zEndIndex = numBound- 1;
int s;
for (s = 0; s < numBound; s++) {
if((k - Bound[s].z) <= PF_THRESHOLD) {
zStartIndex = s;
break;
}
}
for (s = numBound-1; s >= zStartIndex; s--) {
if((Bound[s].z - k) <= PF_THRESHOLD) {
zEndIndex = s;
break;
}
}
int yStartIndex = zStartIndex;
int yEndIndex = zEndIndex;
for (s = zStartIndex; s <= zEndIndex; s++) {
if((j - Bound[s].y) <= PF_THRESHOLD) {
yStartIndex = s;
break;
}
}
for (s = zEndIndex; s >= yStartIndex; s--) {
if((Bound[s].y - j) <= PF_THRESHOLD) {
yEndIndex = s;
break;
}
}
int idx=k*slsz + j*L + i;
force[idx].xd = 0.00;
force[idx].yd = 0.00;
force[idx].zd = 0.00;
if(!inOut) {
if(f[idx] == 0) {
return;
}
}
if(f[idx] == SURF) return;
if(f[idx] == BOUNDARY) return;
int startIndex = yStartIndex;
int endIndex = yEndIndex;
for (s = yStartIndex; s <= yEndIndex; s++) {
if((i - Bound[s].x) <= PF_THRESHOLD) {
startIndex = s;
break;
}
}
for (s = yEndIndex; s >= startIndex; s--) {
if((Bound[s].x - i) <= PF_THRESHOLD) {
endIndex = s;
break;
}
}
if(endIndex < startIndex) {
startIndex = 0;
endIndex = numBound - 1;
}
for (s = startIndex; s <= endIndex; s++) {
float v1 = i - Bound[s].x;
float v2 = j - Bound[s].y;
float v3 = k - Bound[s].z;
float r, t;
#ifdef EUCLIDEAN_METRIC
// euclidean metric
r = sqrtf(v1*v1 + v2*v2 + v3*v3);
#else
// simpler metric
r = abs(v1) + abs(v2) + abs(v3);
#endif
if(r != 0.00) {
t = 1.00;
for(int p = 0; p <= fieldStrenght; p++) {
t = t * r;
}
r = t;
force[idx].xd+=(v1/r);
force[idx].yd+=(v2/r);
force[idx].zd+=(v3/r);
}
}
}
/*
This kernel computes potential field at every boundary voxel using
the neighbours stored in ng-array(shared memory)
*/
__global__ void computePotentialFieldForBoundaryVoxels(unsigned char* f, Vector* force, int slsz, bool inOut, int L) {
int k=blockIdx.x+1;
int j=threadIdx.x+1;
int i=blockIdx.y+1;
__shared__ int ng[26];
if(threadIdx.x==0)
{
// face neighbors
ng[0] = + slsz + 0 + 0;
ng[1] = - slsz + 0 + 0;
ng[2] = + 0 + L + 0;
ng[3] = + 0 - L + 0;
ng[4] = + 0 + 0 + 1;
ng[5] = + 0 + 0 - 1;
// v-neighbors
ng[6] = - slsz - L - 1;
ng[7] = - slsz - L + 1;
ng[8] = - slsz + L - 1;
ng[9] = - slsz + L + 1;
ng[10] = + slsz - L - 1;
ng[11] = + slsz - L + 1;
ng[12] = + slsz + L - 1;
ng[13] = + slsz + L + 1;
// e-neighbors
ng[14] = + slsz + L + 0;
ng[15] = + slsz - L + 0;
ng[16] = - slsz + L + 0;
ng[17] = - slsz - L + 0;
ng[18] = + slsz + 0 + 1;
ng[19] = + slsz + 0 - 1;
ng[20] = - slsz + 0 + 1;
ng[21] = - slsz + 0 - 1;
ng[22] = + 0 + L + 1;
ng[23] = + 0 + L - 1;
ng[24] = + 0 - L + 1;
ng[25] = + 0 - L - 1;
}
__syncthreads();
long idx = k*slsz + j*L + i;
if((f[idx] == SURF) ||
(f[idx] == BOUNDARY))
{
force[idx].xd = 0.00;
force[idx].yd = 0.00;
force[idx].zd = 0.00;
float var_xd=0.00;
float var_yd=0.00;
float var_zd=0.00;
// look at the neighbors and average the forces if not 0
//
int v1 = 0;
for(int s=0; s < 26; s++) {
long iidx = idx + ng[s];
if(f[iidx] == SURF) continue;
if(f[iidx] == BOUNDARY) continue;
// if we know the interior of the object, take only interior
// neighbors
if(!inOut) {
if(f[iidx] == EXTERIOR) continue;
}
var_xd = var_xd + force[iidx].xd;
var_yd = var_yd + force[iidx].yd;
var_zd = var_zd + force[iidx].zd;
v1 = v1 + 1;
}
// average
if(v1 != 0) {
var_xd = var_xd / (double) v1;
var_yd= var_yd / (double) v1;
var_zd = var_zd / (double) v1;
}
else {
printf("Boundary voxel has no interior neighbor !!! - Force = 0\n");
}
// normalize
float r = var_xd*var_xd +
var_yd*var_yd +
var_zd*var_zd;
if(r > 0.00) {
r = sqrtf(r);
force[idx].xd = var_xd / r;
force[idx].yd = var_yd / r;
force[idx].zd = var_zd/ r;
}
}
}
bool CalculatePotentialField(
int L, int M, int N, // [in] size of volume
unsigned char* f, // [in] volume flags
int fieldStrenght, // [in] potential field strenght
Vector* force, // [out] force field
bool inOut // [in] flag indicating that we don't
// know what the inside/outside of
// the object is. We have only point
// samples of the boundary.
// DEFAULT: false (only interior)
) {
//hipSetDevice(1);
int Lm1, Mm1, Nm1;
int i,j,k, s, p;
long idx, iidx, slsz, sz;
VoxelPosition* Bound;
int numBound = 0;
bool flagSurf, flagBound;
double r, t;
int v1, v2, v3;
int startIndex, tmpStartIndex, endIndex, tmpEndIndex, zStartIndex, zEndIndex, yStartIndex, yEndIndex;
//
// check volume padding - fast version
//
if(!CheckVolumePadding(f, L, M, N)) {
printf("** Error - Object touches bounding box. Abort.\n");
exit(1);
}
#ifdef _DEBUG
printf("\t************ Potential Field calculation parameters: ******************\n");
#ifdef HALF_BOUNDARY_POINTS
printf("\t** Using only HALF of the boundary points.\n");
#else
printf("\t** Using ALL boundary points.\n");
#endif
#ifdef EUCLIDEAN_METRIC
printf("\t** Using EUCLIDEAN metric.\n");
#else
printf("\t** Using NON EUCLIDEAN metric.\n");
#endif
if(inOut) {
printf("\t** Inside and Outside.\n");
}
else {
printf("\t** Inside ONLY.\n");
}
printf("\t********* Potential Field calculation parameters - end ****************\n");
#endif
if((Bound = new VoxelPosition[BOUND_SIZE]) == NULL) {
printf("\nERROR allocating memory for boundary array! - Abort\n");
exit(1);
}
Lm1 = L - 1;
Mm1 = M - 1;
Nm1 = N - 1;
slsz = L*M; // slice size
sz = slsz*N;
// save all the boundary voxels in array Bound[]
for (k = 1; k < Nm1; k++) {
for (j = 1; j < Mm1; j++) {
for (i = 1; i < Lm1; i++) {
flagSurf = false;
flagBound = true;
idx = k*slsz + j*L + i;
// CASE 1: treat the inner layer
if (f[idx] == 0) continue;
//consider six face neighbors, if anyone is zero, it is a boundary voxel
iidx = k*slsz + j*L + i-1;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select this one as part of the boundary.
flagBound = false;
}
}
#endif
if(!flagSurf || flagBound) {
iidx = k*slsz + j*L + i+1;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select it as part of the boundary.
flagBound = false;
}
}
#endif
if(!flagSurf || flagBound) {
iidx = k*slsz + (j-1)*L + i;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select it as part of the boundary.
flagBound = false;
}
}
#endif
if(!flagSurf || flagBound) {
iidx = k*slsz + (j+1)*L + i;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select it as part of the boundary.
flagBound = false;
}
}
#endif
if(!flagSurf || flagBound) {
iidx = (k-1)*slsz + j*L + i;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select it as part of the boundary.
flagBound = false;
}
}
#endif
if(!flagSurf || flagBound) {
iidx = (k+1)*slsz + j*L + i;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select it as part of the boundary.
flagBound = false;
}
}
#endif
}
}
}
}
}
// restore idx to the right value
idx = k*slsz + j*L + i;
if (flagSurf) {
f[idx] = SURF;
if(flagBound) {
// if no neighbour of this voxel is already marked as boundary, then mark this one.
// or if we are taking all the boundary voxels
// (in this case flagBound stays true)
f[idx] = BOUNDARY;
Bound[numBound].x = i;
Bound[numBound].y = j;
Bound[numBound].z = k;
numBound++;
if(numBound >= BOUND_SIZE) {
printf("ERROR: too many boundary points detected !! - Abort.\n");
exit(1);
}
}
}
}
}
}
//printf("numBound = %d \n", numBound);
#ifdef _DEBUG
PrintElapsedTime("\tPF-1: finding the boundary voxels.");
printf("\t--Found %d boundary voxels.\n", numBound);
#endif
// sort the boundary array.
SortBoundaryArray(numBound, Bound);
#ifdef _DEBUG
PrintElapsedTime("\tPF-2: sorting the boundary voxels.");
#ifdef TRACE
// print the boundary voxels
for(i=0; i < numBound; i++) {
printf("%d %d %d 0.5\n", Bound[i].x, Bound[i].y, Bound[i].z);
}
exit(1);
#endif
#endif
// Compute the potential field
printf("Computing potential field.\n");
dim3 dimBlock(M,1);
dim3 dimGrid(N,L);
VoxelPosition *d_bound;
unsigned char* d_f;
Vector* d_force;
hipMalloc((void **)&d_f,sizeof(unsigned char)*L*M*N);
hipMalloc((void **)&d_bound,sizeof(VoxelPosition)*BOUND_SIZE);
hipMalloc((void **)&d_force,sizeof(Vector)*L*M*N);
hipMemcpy(d_f,f,sizeof(unsigned char)*L*M*N,hipMemcpyHostToDevice);
hipMemcpy(d_bound,Bound,sizeof(VoxelPosition)*BOUND_SIZE,hipMemcpyHostToDevice);
// hipMemcpyToSymbol(d_bound,Bound, numBound* sizeof(VoxelPosition));
hipMemcpy(d_force,force,sizeof(Vector)*L*M*N,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( compute_potential_field), dim3(dimGrid),dim3(dimBlock), 0, 0, d_bound,d_force,numBound,d_f,inOut,slsz,sz,L, fieldStrenght);
hipLaunchKernelGGL(( normalize_vector), dim3(dimGrid),dim3(dimBlock), 0, 0, d_force,d_f,inOut,slsz,L);
delete [] Bound;
#ifdef _DEBUG
PrintElapsedTime("\tPF-3: computing potential field for inside voxels.");
#endif
#ifdef _DEBUG
PrintElapsedTime("\tPF-4: normalizing force vectors for inside voxels.");
#endif
if (!inOut) {
//neighbors:
dim3 dimBlock(Mm1-1,1);
dim3 dimGrid(Nm1-1,Lm1-1);
hipLaunchKernelGGL(( computePotentialFieldForBoundaryVoxels), dim3(dimGrid),dim3(dimBlock), 0, 0, d_f, d_force, slsz, inOut, L);
}
hipMemcpy(force,d_force,sizeof(Vector)*L*M*N,hipMemcpyDeviceToHost);
#ifdef _DEBUG
PrintElapsedTime("\tPF-5: computing potential field for boundary voxels.");
#endif
return true;
}
compareStruct comp;
bool SortBoundaryArray(int numBound, VoxelPosition Bound[]) {
thrust::sort(Bound, Bound+numBound, comp);
return true;
}
| b083fc96b046600d19ebab7d05a813eda8b30597.cu | // ----
// ---- Computes the potential field for a volume
// ---- Input: volume file, dimensions: X, Y, Z, output file name
// ---- Output: normalized potential field:
// 1 vector for each point in the volume
//
// Last change: Thu May 15 15:20:38 EDT 2003 by Nicu D. Cornea
//
//
// #define TRACE
#include "potVect.h"
#include <thrust/sort.h>
#define BOUND_SIZE 1200000
struct compareStruct {
__host__ __device__
bool operator()(VoxelPosition a, VoxelPosition b) {
if(a.z != b.z)
return a.z < b.z;
else if(a.y != b.y)
return a.y < b.y;
else
return a.x < b.x;
}
};
bool SortBoundaryArray(int numBound, VoxelPosition Bound[]);
/*
this kernel normalizes the force field by calculating the multiplication of magnitudes of force in
x,y,z direction and dividing it by the final multiplication.
*/
__global__ void normalize_vector(Vector* force,unsigned char* f, bool inOut,int slsz,int L)
{
int k=blockIdx.x;
int j=threadIdx.x;
int i=blockIdx.y;
int idx=k*slsz + j*L + i;
if(!inOut) {
// only for interior voxels we had calculated forces
if(f[idx] == EXTERIOR) return;
}
float r = force[idx].xd*force[idx].xd +
force[idx].yd*force[idx].yd +
force[idx].zd*force[idx].zd;
if(r > 0.00) {
r = sqrtf(r);
force[idx].xd = force[idx].xd / r;
force[idx].yd = force[idx].yd / r;
force[idx].zd = force[idx].zd / r;
}
}
/*
This kernel computes potential field at every inside voxel using
boundary voxel as point charges
*/
__global__ void compute_potential_field(VoxelPosition *Bound,Vector* force,int numBound,unsigned char* f,bool inOut,int slsz,int sz,int L, int fieldStrenght)
{
int k=blockIdx.x;
int j=threadIdx.x;
int i=blockIdx.y;
int zStartIndex = 0;
int zEndIndex = numBound- 1;
int s;
for (s = 0; s < numBound; s++) {
if((k - Bound[s].z) <= PF_THRESHOLD) {
zStartIndex = s;
break;
}
}
for (s = numBound-1; s >= zStartIndex; s--) {
if((Bound[s].z - k) <= PF_THRESHOLD) {
zEndIndex = s;
break;
}
}
int yStartIndex = zStartIndex;
int yEndIndex = zEndIndex;
for (s = zStartIndex; s <= zEndIndex; s++) {
if((j - Bound[s].y) <= PF_THRESHOLD) {
yStartIndex = s;
break;
}
}
for (s = zEndIndex; s >= yStartIndex; s--) {
if((Bound[s].y - j) <= PF_THRESHOLD) {
yEndIndex = s;
break;
}
}
int idx=k*slsz + j*L + i;
force[idx].xd = 0.00;
force[idx].yd = 0.00;
force[idx].zd = 0.00;
if(!inOut) {
if(f[idx] == 0) {
return;
}
}
if(f[idx] == SURF) return;
if(f[idx] == BOUNDARY) return;
int startIndex = yStartIndex;
int endIndex = yEndIndex;
for (s = yStartIndex; s <= yEndIndex; s++) {
if((i - Bound[s].x) <= PF_THRESHOLD) {
startIndex = s;
break;
}
}
for (s = yEndIndex; s >= startIndex; s--) {
if((Bound[s].x - i) <= PF_THRESHOLD) {
endIndex = s;
break;
}
}
if(endIndex < startIndex) {
startIndex = 0;
endIndex = numBound - 1;
}
for (s = startIndex; s <= endIndex; s++) {
float v1 = i - Bound[s].x;
float v2 = j - Bound[s].y;
float v3 = k - Bound[s].z;
float r, t;
#ifdef EUCLIDEAN_METRIC
// euclidean metric
r = sqrtf(v1*v1 + v2*v2 + v3*v3);
#else
// simpler metric
r = abs(v1) + abs(v2) + abs(v3);
#endif
if(r != 0.00) {
t = 1.00;
for(int p = 0; p <= fieldStrenght; p++) {
t = t * r;
}
r = t;
force[idx].xd+=(v1/r);
force[idx].yd+=(v2/r);
force[idx].zd+=(v3/r);
}
}
}
/*
This kernel computes potential field at every boundary voxel using
the neighbours stored in ng-array(shared memory)
*/
__global__ void computePotentialFieldForBoundaryVoxels(unsigned char* f, Vector* force, int slsz, bool inOut, int L) {
int k=blockIdx.x+1;
int j=threadIdx.x+1;
int i=blockIdx.y+1;
__shared__ int ng[26];
if(threadIdx.x==0)
{
// face neighbors
ng[0] = + slsz + 0 + 0;
ng[1] = - slsz + 0 + 0;
ng[2] = + 0 + L + 0;
ng[3] = + 0 - L + 0;
ng[4] = + 0 + 0 + 1;
ng[5] = + 0 + 0 - 1;
// v-neighbors
ng[6] = - slsz - L - 1;
ng[7] = - slsz - L + 1;
ng[8] = - slsz + L - 1;
ng[9] = - slsz + L + 1;
ng[10] = + slsz - L - 1;
ng[11] = + slsz - L + 1;
ng[12] = + slsz + L - 1;
ng[13] = + slsz + L + 1;
// e-neighbors
ng[14] = + slsz + L + 0;
ng[15] = + slsz - L + 0;
ng[16] = - slsz + L + 0;
ng[17] = - slsz - L + 0;
ng[18] = + slsz + 0 + 1;
ng[19] = + slsz + 0 - 1;
ng[20] = - slsz + 0 + 1;
ng[21] = - slsz + 0 - 1;
ng[22] = + 0 + L + 1;
ng[23] = + 0 + L - 1;
ng[24] = + 0 - L + 1;
ng[25] = + 0 - L - 1;
}
__syncthreads();
long idx = k*slsz + j*L + i;
if((f[idx] == SURF) ||
(f[idx] == BOUNDARY))
{
force[idx].xd = 0.00;
force[idx].yd = 0.00;
force[idx].zd = 0.00;
float var_xd=0.00;
float var_yd=0.00;
float var_zd=0.00;
// look at the neighbors and average the forces if not 0
//
int v1 = 0;
for(int s=0; s < 26; s++) {
long iidx = idx + ng[s];
if(f[iidx] == SURF) continue;
if(f[iidx] == BOUNDARY) continue;
// if we know the interior of the object, take only interior
// neighbors
if(!inOut) {
if(f[iidx] == EXTERIOR) continue;
}
var_xd = var_xd + force[iidx].xd;
var_yd = var_yd + force[iidx].yd;
var_zd = var_zd + force[iidx].zd;
v1 = v1 + 1;
}
// average
if(v1 != 0) {
var_xd = var_xd / (double) v1;
var_yd= var_yd / (double) v1;
var_zd = var_zd / (double) v1;
}
else {
printf("Boundary voxel has no interior neighbor !!! - Force = 0\n");
}
// normalize
float r = var_xd*var_xd +
var_yd*var_yd +
var_zd*var_zd;
if(r > 0.00) {
r = sqrtf(r);
force[idx].xd = var_xd / r;
force[idx].yd = var_yd / r;
force[idx].zd = var_zd/ r;
}
}
}
bool CalculatePotentialField(
int L, int M, int N, // [in] size of volume
unsigned char* f, // [in] volume flags
int fieldStrenght, // [in] potential field strenght
Vector* force, // [out] force field
bool inOut // [in] flag indicating that we don't
// know what the inside/outside of
// the object is. We have only point
// samples of the boundary.
// DEFAULT: false (only interior)
) {
//cudaSetDevice(1);
int Lm1, Mm1, Nm1;
int i,j,k, s, p;
long idx, iidx, slsz, sz;
VoxelPosition* Bound;
int numBound = 0;
bool flagSurf, flagBound;
double r, t;
int v1, v2, v3;
int startIndex, tmpStartIndex, endIndex, tmpEndIndex, zStartIndex, zEndIndex, yStartIndex, yEndIndex;
//
// check volume padding - fast version
//
if(!CheckVolumePadding(f, L, M, N)) {
printf("** Error - Object touches bounding box. Abort.\n");
exit(1);
}
#ifdef _DEBUG
printf("\t************ Potential Field calculation parameters: ******************\n");
#ifdef HALF_BOUNDARY_POINTS
printf("\t** Using only HALF of the boundary points.\n");
#else
printf("\t** Using ALL boundary points.\n");
#endif
#ifdef EUCLIDEAN_METRIC
printf("\t** Using EUCLIDEAN metric.\n");
#else
printf("\t** Using NON EUCLIDEAN metric.\n");
#endif
if(inOut) {
printf("\t** Inside and Outside.\n");
}
else {
printf("\t** Inside ONLY.\n");
}
printf("\t********* Potential Field calculation parameters - end ****************\n");
#endif
if((Bound = new VoxelPosition[BOUND_SIZE]) == NULL) {
printf("\nERROR allocating memory for boundary array! - Abort\n");
exit(1);
}
Lm1 = L - 1;
Mm1 = M - 1;
Nm1 = N - 1;
slsz = L*M; // slice size
sz = slsz*N;
// save all the boundary voxels in array Bound[]
for (k = 1; k < Nm1; k++) {
for (j = 1; j < Mm1; j++) {
for (i = 1; i < Lm1; i++) {
flagSurf = false;
flagBound = true;
idx = k*slsz + j*L + i;
// CASE 1: treat the inner layer
if (f[idx] == 0) continue;
//consider six face neighbors, if anyone is zero, it is a boundary voxel
iidx = k*slsz + j*L + i-1;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select this one as part of the boundary.
flagBound = false;
}
}
#endif
if(!flagSurf || flagBound) {
iidx = k*slsz + j*L + i+1;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select it as part of the boundary.
flagBound = false;
}
}
#endif
if(!flagSurf || flagBound) {
iidx = k*slsz + (j-1)*L + i;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select it as part of the boundary.
flagBound = false;
}
}
#endif
if(!flagSurf || flagBound) {
iidx = k*slsz + (j+1)*L + i;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select it as part of the boundary.
flagBound = false;
}
}
#endif
if(!flagSurf || flagBound) {
iidx = (k-1)*slsz + j*L + i;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select it as part of the boundary.
flagBound = false;
}
}
#endif
if(!flagSurf || flagBound) {
iidx = (k+1)*slsz + j*L + i;
if (f[iidx] == 0) {
flagSurf = true;
}
#ifdef HALF_BOUNDARY_POINTS
// consider only half of the boundary points
else {
if (f[iidx] == BOUNDARY) {
// a neighbour of the point was already selected so we will not select it as part of the boundary.
flagBound = false;
}
}
#endif
}
}
}
}
}
// restore idx to the right value
idx = k*slsz + j*L + i;
if (flagSurf) {
f[idx] = SURF;
if(flagBound) {
// if no neighbour of this voxel is already marked as boundary, then mark this one.
// or if we are taking all the boundary voxels
// (in this case flagBound stays true)
f[idx] = BOUNDARY;
Bound[numBound].x = i;
Bound[numBound].y = j;
Bound[numBound].z = k;
numBound++;
if(numBound >= BOUND_SIZE) {
printf("ERROR: too many boundary points detected !! - Abort.\n");
exit(1);
}
}
}
}
}
}
//printf("numBound = %d \n", numBound);
#ifdef _DEBUG
PrintElapsedTime("\tPF-1: finding the boundary voxels.");
printf("\t--Found %d boundary voxels.\n", numBound);
#endif
// sort the boundary array.
SortBoundaryArray(numBound, Bound);
#ifdef _DEBUG
PrintElapsedTime("\tPF-2: sorting the boundary voxels.");
#ifdef TRACE
// print the boundary voxels
for(i=0; i < numBound; i++) {
printf("%d %d %d 0.5\n", Bound[i].x, Bound[i].y, Bound[i].z);
}
exit(1);
#endif
#endif
// Compute the potential field
printf("Computing potential field.\n");
dim3 dimBlock(M,1);
dim3 dimGrid(N,L);
VoxelPosition *d_bound;
unsigned char* d_f;
Vector* d_force;
cudaMalloc((void **)&d_f,sizeof(unsigned char)*L*M*N);
cudaMalloc((void **)&d_bound,sizeof(VoxelPosition)*BOUND_SIZE);
cudaMalloc((void **)&d_force,sizeof(Vector)*L*M*N);
cudaMemcpy(d_f,f,sizeof(unsigned char)*L*M*N,cudaMemcpyHostToDevice);
cudaMemcpy(d_bound,Bound,sizeof(VoxelPosition)*BOUND_SIZE,cudaMemcpyHostToDevice);
// cudaMemcpyToSymbol(d_bound,Bound, numBound* sizeof(VoxelPosition));
cudaMemcpy(d_force,force,sizeof(Vector)*L*M*N,cudaMemcpyHostToDevice);
compute_potential_field<<<dimGrid,dimBlock>>>(d_bound,d_force,numBound,d_f,inOut,slsz,sz,L, fieldStrenght);
normalize_vector<<<dimGrid,dimBlock>>>(d_force,d_f,inOut,slsz,L);
delete [] Bound;
#ifdef _DEBUG
PrintElapsedTime("\tPF-3: computing potential field for inside voxels.");
#endif
#ifdef _DEBUG
PrintElapsedTime("\tPF-4: normalizing force vectors for inside voxels.");
#endif
if (!inOut) {
//neighbors:
dim3 dimBlock(Mm1-1,1);
dim3 dimGrid(Nm1-1,Lm1-1);
computePotentialFieldForBoundaryVoxels<<<dimGrid,dimBlock>>>(d_f, d_force, slsz, inOut, L);
}
cudaMemcpy(force,d_force,sizeof(Vector)*L*M*N,cudaMemcpyDeviceToHost);
#ifdef _DEBUG
PrintElapsedTime("\tPF-5: computing potential field for boundary voxels.");
#endif
return true;
}
compareStruct comp;
bool SortBoundaryArray(int numBound, VoxelPosition Bound[]) {
thrust::sort(Bound, Bound+numBound, comp);
return true;
}
|
5f40b106a3bcdbb02d6eb451fb9bdeb8960b8645.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12) {
if (comp == +1.3641E-42f * var_4) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
comp += powf((+1.0759E36f + var_5 - -1.6288E-41f - (-1.2986E35f * (var_6 - +1.3532E-41f))), var_7 + -1.8067E36f);
for (int i=0; i < var_3; ++i) {
comp = var_8 - (var_9 / (+1.4957E-14f - var_10 / var_11 * var_12));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13);
hipDeviceSynchronize();
return 0;
}
| 5f40b106a3bcdbb02d6eb451fb9bdeb8960b8645.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12) {
if (comp == +1.3641E-42f * var_4) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
comp += powf((+1.0759E36f + var_5 - -1.6288E-41f - (-1.2986E35f * (var_6 - +1.3532E-41f))), var_7 + -1.8067E36f);
for (int i=0; i < var_3; ++i) {
comp = var_8 - (var_9 / (+1.4957E-14f - var_10 / var_11 * var_12));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13);
cudaDeviceSynchronize();
return 0;
}
|
1facc8c6604d19208efd70bf1ea873bad3ae7321.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaBuffer.h"
#include "FluidKernels.h"
#ifdef __HIPCC__
typedef texture< float, hipTextureType2D, hipReadModeElementType > fluidTexture;
typedef surface< void, cudaSurfaceType2D > fluidSurface;
fluidTexture frontTex;
fluidSurface backSurface;
fluidTexture veloXTex;
fluidSurface veloXSurface;
fluidTexture veloYTex;
fluidSurface veloYSurface;
fluidTexture tmp1Texture;
fluidSurface tmp1Surface;
fluidTexture tmp2Texture;
fluidSurface tmp2Surface;
fluidTexture divTexture;
fluidSurface divSurface;
extern "C" __global__ void
diffuse_Kernel(float diff, int width, int height, float dt)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float ustep = 1.0f / (float)width;
float vstep = 1.0f / (float)height;
float u = cuGetCoord(x, width);
float v = cuGetCoord(y, height);
float pC = tex2D(frontTex, u, v);
float pL = tex2D(frontTex, u-ustep, v);
float pR = tex2D(frontTex, u+ustep, v);
float pB = tex2D(frontTex, u, v-vstep);
float pT = tex2D(frontTex, u, v+vstep);
float a = dt * diff * width * height;
float result = (pC + a * (pL + pR + pB + pT))/(1.0f+4.0f*a);
surf2Dwrite<float>(result, backSurface, x*4, y);
//surf2Dwrite<float>(pC-0.01f, backSurface, x*4, y);
}
extern "C" __device__ float
cuGetCoord(int pos, int stride){
return ((pos+0.5f) / stride);
}
extern "C" __global__ void
project_Kernel(int width, int height, float H)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = cuGetCoord(x, width);
float v = cuGetCoord(y, height);
float ustep = 1.0f / (float)width;
float vstep = 1.0f / (float)height;
float rX = tex2D(veloXTex, u+ustep, v);
float lX = tex2D(veloXTex, u-ustep, v);
float rY = tex2D(veloYTex, u, v+vstep);
float lY = tex2D(veloYTex, u, v-vstep);
float val = -0.5f * H *(rX - lX + rY - lY);
surf2Dwrite<float>(val, divSurface, x*4, y);
surf2Dwrite<float>(0.0f, tmp1Surface, x*4, y);
surf2Dwrite<float>(0.0f, tmp2Surface, x*4, y);
}
extern "C" __global__ void
project_Kernel2(int width, int height){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = cuGetCoord(x, width);
float v = cuGetCoord(y, height);
float ustep = 1.0f / (float)width;
float vstep = 1.0f / (float)height;
float pL = tex2D(tmp1Texture, u-ustep, v);
float pR = tex2D(tmp1Texture, u+ustep, v);
float pT = tex2D(tmp1Texture, u, v-vstep);
float pB = tex2D(tmp1Texture, u, v+vstep);
float val = (tex2D(divTexture, u, v) + pL + pR + pT + pB )/4.0f;
surf2Dwrite<float>(val, tmp2Surface, x*4, y);
}
extern "C" __global__ void
project_Kernel3(int width, int height, float H){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = cuGetCoord(x, width);
float v = cuGetCoord(y, height);
float ustep = 1.0f / (float)width;
float vstep = 1.0f / (float)height;
float pL = tex2D(tmp1Texture, u-ustep, v);
float pR = tex2D(tmp1Texture, u+ustep, v);
float pT = tex2D(tmp1Texture, u, v-vstep);
float pB = tex2D(tmp1Texture, u, v+vstep);
float valX = tex2D(veloXTex, u, v) - (0.5f*(pR - pL)/H);
float valY = tex2D(veloYTex, u, v) - (0.5f*(pB - pT)/H);
surf2Dwrite<float>(valX, veloXSurface, x*4, y);
surf2Dwrite<float>(valY, veloYSurface, x*4, y);
}
extern "C" __global__ void
advect_Kernel(int width, int height, float dt)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = cuGetCoord(x, width);
float v = cuGetCoord(y, height);
float vx = (tex2D(veloXTex, u, v));
float vy = (tex2D(veloYTex, u, v));
vx /= width;
vy /= height;
surf2Dwrite<float>(tex2D(frontTex, u-vx, v-vy), backSurface, x*4, y);
}
extern "C" __global__ void
get_back_data_Kernel(float * g_data, int width){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
g_data[y*width+x] = surf2Dread<float>(backSurface, x*4, y);
}
extern "C" __global__ void
get_front_data_Kernel(float * g_data, int width, int height){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = cuGetCoord(x, width);
float v = cuGetCoord(y, height);
g_data[y*width+x] = tex2D(frontTex, u, v);
}
extern "C" __global__ void
fill_vbo_i8(char *dst, int width, int height){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = cuGetCoord(x, width);
float v = cuGetCoord(y, height);
dst[y*width+x] = (char)(tex2D(frontTex, u, v)*255);
}
#endif
CudaBuffer::CudaBuffer(const size_t width, const size_t height, FieldType type):
m_Width(width),
m_Height(height),
m_isUpdating(false),
m_ReadPtr(0),
m_WritePtr(0),
m_dataFront(new float[width*height]),
m_dataBack(new float[width*height]),
m_dDataPtr(0),
m_readPtr(0),
m_writePtr(0),
m_dimBlock(dim3(8, 8, 1)),
m_dimGrid(dim3(width/8, height/8, 1)),
m_type(type)
{
size_t size = width*height;
m_dataSize = size*sizeof(float);
std::fill(m_dataFront, m_dataFront+(size), 0.0f);
std::fill(m_dataBack, m_dataBack+(size), 0.0f);
checkCudaErrors( hipMalloc( (void**) &m_dDataPtr, m_dataSize));
m_channelDescr = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
checkCudaErrors( hipMallocArray( &m_cuFrontPtr, &m_channelDescr, m_Width, m_Height, hipArraySurfaceLoadStore ));
checkCudaErrors( hipMallocArray( &m_cuBackPtr, &m_channelDescr, m_Width, m_Height, hipArraySurfaceLoadStore ));
checkCudaErrors( hipMemcpyToArray( m_cuFrontPtr, 0, 0, m_dataFront, m_dataSize, hipMemcpyHostToDevice));
checkCudaErrors( hipMemcpyToArray( m_cuBackPtr, 0, 0, m_dataBack, m_dataSize, hipMemcpyHostToDevice));
frontTex.addressMode[0] = hipAddressModeWrap;
frontTex.addressMode[1] = hipAddressModeWrap;
frontTex.filterMode = hipFilterModeLinear;
frontTex.normalized = true;
veloXTex.addressMode[0] = hipAddressModeWrap;
veloXTex.addressMode[1] = hipAddressModeWrap;
veloXTex.filterMode = hipFilterModeLinear;
veloXTex.normalized = true;
veloYTex.addressMode[0] = hipAddressModeWrap;
veloYTex.addressMode[1] = hipAddressModeWrap;
veloYTex.filterMode = hipFilterModeLinear;
veloYTex.normalized = true;
bind();
}
void CudaBuffer::bind(bool bindFront){
unbind();
if(bindFront){
checkCudaErrors( hipBindTextureToArray( frontTex, m_cuFrontPtr, m_channelDescr ));
checkCudaErrors( hipBindSurfaceToArray( backSurface, m_cuBackPtr ));
}
switch(m_type){
case VELO_X: {
checkCudaErrors( hipUnbindTexture( veloXTex) );
checkCudaErrors( hipBindTextureToArray( veloXTex, m_cuFrontPtr, m_channelDescr ));
checkCudaErrors( hipBindSurfaceToArray( veloXSurface, m_cuBackPtr ));
break;
}
case VELO_Y: {
checkCudaErrors( hipUnbindTexture( veloYTex) );
checkCudaErrors( hipBindTextureToArray( veloYTex, m_cuFrontPtr, m_channelDescr ));
checkCudaErrors( hipBindSurfaceToArray( veloYSurface, m_cuBackPtr ));
break;
}
}
}
void CudaBuffer::unbind(){
checkCudaErrors( hipUnbindTexture( frontTex) );
}
CudaBuffer::~CudaBuffer(){
delete []m_dataFront;
delete []m_dataBack;
checkCudaErrors( hipUnbindTexture( veloXTex) );
checkCudaErrors( hipUnbindTexture( veloYTex) );
checkCudaErrors( hipUnbindTexture( frontTex) );
hipFreeArray(m_cuFrontPtr);
hipFreeArray(m_cuBackPtr);
}
float* CudaBuffer::beginUpdate(BufferType readBuffer, BufferType writeBuffer){
if(! m_isUpdating){
m_isUpdating = true;
bind();
if(readBuffer == CudaBuffer::FRONT){
hipLaunchKernelGGL(( get_front_data_Kernel), dim3(m_dimGrid), dim3(m_dimBlock) , 0 , 0, m_dDataPtr, m_Width, m_Height);
checkCudaErrors( hipMemcpy( m_dataFront, m_dDataPtr, m_dataSize, hipMemcpyDeviceToHost) );
m_readPtr = m_dataFront;
}
if(readBuffer == CudaBuffer::BACK){
hipLaunchKernelGGL(( get_back_data_Kernel), dim3(m_dimGrid), dim3(m_dimBlock) , 0 , 0, m_dDataPtr, m_Width);
checkCudaErrors( hipMemcpy( m_dataBack, m_dDataPtr, m_dataSize, hipMemcpyDeviceToHost) );
m_readPtr = m_dataBack;
}
if(writeBuffer == CudaBuffer::FRONT){
hipLaunchKernelGGL(( get_front_data_Kernel), dim3(m_dimGrid), dim3(m_dimBlock) , 0 , 0, m_dDataPtr, m_Width, m_Height);
checkCudaErrors( hipMemcpy( m_dataFront, m_dDataPtr, m_dataSize, hipMemcpyDeviceToHost) );
m_writePtr = m_dataFront;
m_cuWritePtr = m_cuFrontPtr;
}
if(writeBuffer == CudaBuffer::BACK){
hipLaunchKernelGGL(( get_back_data_Kernel), dim3(m_dimGrid), dim3(m_dimBlock) , 0 , 0, m_dDataPtr, m_Width);
checkCudaErrors( hipMemcpy( m_dataBack, m_dDataPtr, m_dataSize, hipMemcpyDeviceToHost) );
m_writePtr = m_dataBack;
m_cuWritePtr = m_cuBackPtr;
}
if(writeBuffer == CudaBuffer::NONE){
m_writePtr = 0;
m_cuWritePtr = 0;
}
if(readBuffer == CudaBuffer::NONE) m_readPtr = 0;
return m_writePtr;
}else{
throw UpdatingException("In beginUpdate");
}
}
void CudaBuffer::flushUpdate(){
if( m_isUpdating){
if (m_cuWritePtr){
checkCudaErrors( hipMemcpyToArray( m_cuWritePtr, 0, 0, m_writePtr, m_dataSize, hipMemcpyHostToDevice));
}else{
throw UpdatingException("Can't flush to NULL");
}
}else{
throw UpdatingException("In flushUpdate");
}
}
void CudaBuffer::endUpdate(bool flush){
if( m_isUpdating){
if(flush){
flushUpdate();
}
m_cuWritePtr = 0;
m_writePtr = 0;
m_readPtr = 0;
m_isUpdating = false;
}else{
throw UpdatingException("In Flush");
}
}
void CudaBuffer::flip(){
if(! m_isUpdating){
checkCudaErrors( hipUnbindTexture( frontTex ));
std::swap(m_cuFrontPtr, m_cuBackPtr);
checkCudaErrors( hipBindTextureToArray( frontTex, m_cuFrontPtr, m_channelDescr ));
checkCudaErrors( hipBindSurfaceToArray( backSurface, m_cuBackPtr));
}else{
throw UpdatingException("In flip");
}
}
void CudaBuffer::setValue(const size_t x, const size_t y, const float value)
{
if(m_isUpdating){
setValue(x, y, value, 1.0f);
}else{
throw UpdatingException("In Set Value");
}
}
void CudaBuffer::setValue(const size_t x, const size_t y, const float value, const float weight)
{
if(m_isUpdating){
m_writePtr[getIndex(x, y)] = weight*value + (1 - weight)* m_readPtr[getIndex(x, y)] ;
}else{
throw UpdatingException("In SetValue");
}
}
const float& CudaBuffer::operator()(const size_t x, const size_t y) const
{
return m_readPtr[getIndex(x, y)];
}
const float& CudaBuffer::operator()(const int x, const int y) const
{
return m_readPtr[getIndex(x, y)];
}
float CudaBuffer::operator()(float x, float y) const
{
while(x<0)
x+=m_Width;
while(x>=m_Width)
x-=m_Width;
while(y<0)
y+=m_Height;
while(y>=m_Height)
y-=m_Height;
float nx = x-(int)x;
float ny = y-(int)y;
int iX = (int)x;
int iY = (int)y;
if(!m_readPtr){
#ifdef DEBUG_OUTPUT
std::cout << "No Read Ptr\n";
#endif
}
float xInter1 = m_readPtr[getIndex(iX, iY)]*(1-nx) + m_readPtr[getIndex(iX+1, iY)]*nx;
float xInter2 = m_readPtr[getIndex(iX, iY+1)]*(1-nx) + m_readPtr[getIndex(iX+1, iY+1)]*nx;
return xInter1*(1-ny)+xInter2*ny;
}
//GLOBAL FIELDS
extern "C" __global__ void
get_display_Kernel(float * g_data, int widthFactor, float pressureContrast,float veloContrast,int width, int height){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x<width) {
//Pressure display
float u = cuGetCoord(x-width, width);
float v = cuGetCoord(y, height);
float val = tex2D(frontTex, u, v) * pressureContrast;
int coord = (y*width*widthFactor+x)*3;
g_data[coord+2] = val;
g_data[coord+1] = val;
g_data[coord] = val;
}else{
//Speed display
float u = cuGetCoord(x-width, width);
float v = cuGetCoord(y, height);
int coord = (y*width*widthFactor+x)*3;
g_data[coord] = tex2D(veloXTex, u, v)*0.5f*veloContrast + 0.5f;
g_data[coord+1] = tex2D(veloYTex, u, v)*0.5f*veloContrast + 0.5f;
g_data[coord+2] = 0;
}
}
void TmpFields::flip(){
checkCudaErrors( hipUnbindTexture( tmp1Texture) );
checkCudaErrors( hipUnbindTexture( tmp2Texture) );
std::swap(tmp1SurfacePtr, tmp2SurfacePtr);
checkCudaErrors( hipBindTextureToArray( tmp1Texture, tmp1SurfacePtr, channelDescr ));
checkCudaErrors( hipBindSurfaceToArray( tmp1Surface, tmp1SurfacePtr ));
checkCudaErrors( hipBindTextureToArray( tmp2Texture, tmp2SurfacePtr, channelDescr ));
checkCudaErrors( hipBindSurfaceToArray( tmp2Surface, tmp2SurfacePtr ));
};
void TmpFields::bind(){
checkCudaErrors( hipBindTextureToArray( tmp1Texture, tmp1SurfacePtr, channelDescr ));
checkCudaErrors( hipBindSurfaceToArray( tmp1Surface, tmp1SurfacePtr ));
checkCudaErrors( hipBindTextureToArray( tmp2Texture, tmp2SurfacePtr, channelDescr ));
checkCudaErrors( hipBindSurfaceToArray( tmp2Surface, tmp2SurfacePtr ));
checkCudaErrors( hipBindTextureToArray( divTexture, divSurfacePtr, channelDescr ));
checkCudaErrors( hipBindSurfaceToArray( divSurface, divSurfacePtr ));
}
void TmpFields::init(int width, int height){
//Display memory
m_displayDataSize = width*2*height*sizeof(float)*3;
checkCudaErrors( hipMalloc( (void**) &m_displayPtr, m_displayDataSize));
//Tex'n'Surf
channelDescr = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
checkCudaErrors( hipMallocArray( &tmp1SurfacePtr, &channelDescr, width, height, hipArraySurfaceLoadStore ));
checkCudaErrors( hipMallocArray( &tmp2SurfacePtr, &channelDescr, width, height, hipArraySurfaceLoadStore ));
checkCudaErrors( hipMallocArray( &divSurfacePtr, &channelDescr, width, height, hipArraySurfaceLoadStore ));
tmp1Texture.addressMode[0] = hipAddressModeWrap;
tmp1Texture.addressMode[1] = hipAddressModeWrap;
tmp1Texture.filterMode = hipFilterModePoint;
tmp1Texture.normalized = true;
tmp2Texture.addressMode[0] = hipAddressModeWrap;
tmp2Texture.addressMode[1] = hipAddressModeWrap;
tmp2Texture.filterMode = hipFilterModePoint;
tmp2Texture.normalized = true;
divTexture.addressMode[0] = hipAddressModeWrap;
divTexture.addressMode[1] = hipAddressModeWrap;
divTexture.filterMode = hipFilterModePoint;
divTexture.normalized = true;
}
void TmpFields::free(){
checkCudaErrors( hipUnbindTexture( tmp1Texture) );
checkCudaErrors( hipUnbindTexture( tmp2Texture) );
checkCudaErrors( hipUnbindTexture( divTexture) );
hipFreeArray(tmp1SurfacePtr);
hipFreeArray(tmp2SurfacePtr);
hipFreeArray(divSurfacePtr);
}
| 1facc8c6604d19208efd70bf1ea873bad3ae7321.cu | #include "CudaBuffer.h"
#include "FluidKernels.h"
#ifdef __CUDACC__
typedef texture< float, cudaTextureType2D, cudaReadModeElementType > fluidTexture;
typedef surface< void, cudaSurfaceType2D > fluidSurface;
fluidTexture frontTex;
fluidSurface backSurface;
fluidTexture veloXTex;
fluidSurface veloXSurface;
fluidTexture veloYTex;
fluidSurface veloYSurface;
fluidTexture tmp1Texture;
fluidSurface tmp1Surface;
fluidTexture tmp2Texture;
fluidSurface tmp2Surface;
fluidTexture divTexture;
fluidSurface divSurface;
extern "C" __global__ void
diffuse_Kernel(float diff, int width, int height, float dt)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float ustep = 1.0f / (float)width;
float vstep = 1.0f / (float)height;
float u = cuGetCoord(x, width);
float v = cuGetCoord(y, height);
float pC = tex2D(frontTex, u, v);
float pL = tex2D(frontTex, u-ustep, v);
float pR = tex2D(frontTex, u+ustep, v);
float pB = tex2D(frontTex, u, v-vstep);
float pT = tex2D(frontTex, u, v+vstep);
float a = dt * diff * width * height;
float result = (pC + a * (pL + pR + pB + pT))/(1.0f+4.0f*a);
surf2Dwrite<float>(result, backSurface, x*4, y);
//surf2Dwrite<float>(pC-0.01f, backSurface, x*4, y);
}
extern "C" __device__ float
cuGetCoord(int pos, int stride){
return ((pos+0.5f) / stride);
}
extern "C" __global__ void
project_Kernel(int width, int height, float H)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = cuGetCoord(x, width);
float v = cuGetCoord(y, height);
float ustep = 1.0f / (float)width;
float vstep = 1.0f / (float)height;
float rX = tex2D(veloXTex, u+ustep, v);
float lX = tex2D(veloXTex, u-ustep, v);
float rY = tex2D(veloYTex, u, v+vstep);
float lY = tex2D(veloYTex, u, v-vstep);
float val = -0.5f * H *(rX - lX + rY - lY);
surf2Dwrite<float>(val, divSurface, x*4, y);
surf2Dwrite<float>(0.0f, tmp1Surface, x*4, y);
surf2Dwrite<float>(0.0f, tmp2Surface, x*4, y);
}
extern "C" __global__ void
project_Kernel2(int width, int height){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = cuGetCoord(x, width);
float v = cuGetCoord(y, height);
float ustep = 1.0f / (float)width;
float vstep = 1.0f / (float)height;
float pL = tex2D(tmp1Texture, u-ustep, v);
float pR = tex2D(tmp1Texture, u+ustep, v);
float pT = tex2D(tmp1Texture, u, v-vstep);
float pB = tex2D(tmp1Texture, u, v+vstep);
float val = (tex2D(divTexture, u, v) + pL + pR + pT + pB )/4.0f;
surf2Dwrite<float>(val, tmp2Surface, x*4, y);
}
extern "C" __global__ void
project_Kernel3(int width, int height, float H){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = cuGetCoord(x, width);
float v = cuGetCoord(y, height);
float ustep = 1.0f / (float)width;
float vstep = 1.0f / (float)height;
float pL = tex2D(tmp1Texture, u-ustep, v);
float pR = tex2D(tmp1Texture, u+ustep, v);
float pT = tex2D(tmp1Texture, u, v-vstep);
float pB = tex2D(tmp1Texture, u, v+vstep);
float valX = tex2D(veloXTex, u, v) - (0.5f*(pR - pL)/H);
float valY = tex2D(veloYTex, u, v) - (0.5f*(pB - pT)/H);
surf2Dwrite<float>(valX, veloXSurface, x*4, y);
surf2Dwrite<float>(valY, veloYSurface, x*4, y);
}
extern "C" __global__ void
advect_Kernel(int width, int height, float dt)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = cuGetCoord(x, width);
float v = cuGetCoord(y, height);
float vx = (tex2D(veloXTex, u, v));
float vy = (tex2D(veloYTex, u, v));
vx /= width;
vy /= height;
surf2Dwrite<float>(tex2D(frontTex, u-vx, v-vy), backSurface, x*4, y);
}
extern "C" __global__ void
get_back_data_Kernel(float * g_data, int width){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
g_data[y*width+x] = surf2Dread<float>(backSurface, x*4, y);
}
extern "C" __global__ void
get_front_data_Kernel(float * g_data, int width, int height){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = cuGetCoord(x, width);
float v = cuGetCoord(y, height);
g_data[y*width+x] = tex2D(frontTex, u, v);
}
extern "C" __global__ void
fill_vbo_i8(char *dst, int width, int height){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = cuGetCoord(x, width);
float v = cuGetCoord(y, height);
dst[y*width+x] = (char)(tex2D(frontTex, u, v)*255);
}
#endif
CudaBuffer::CudaBuffer(const size_t width, const size_t height, FieldType type):
m_Width(width),
m_Height(height),
m_isUpdating(false),
m_ReadPtr(0),
m_WritePtr(0),
m_dataFront(new float[width*height]),
m_dataBack(new float[width*height]),
m_dDataPtr(0),
m_readPtr(0),
m_writePtr(0),
m_dimBlock(dim3(8, 8, 1)),
m_dimGrid(dim3(width/8, height/8, 1)),
m_type(type)
{
size_t size = width*height;
m_dataSize = size*sizeof(float);
std::fill(m_dataFront, m_dataFront+(size), 0.0f);
std::fill(m_dataBack, m_dataBack+(size), 0.0f);
checkCudaErrors( cudaMalloc( (void**) &m_dDataPtr, m_dataSize));
m_channelDescr = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
checkCudaErrors( cudaMallocArray( &m_cuFrontPtr, &m_channelDescr, m_Width, m_Height, cudaArraySurfaceLoadStore ));
checkCudaErrors( cudaMallocArray( &m_cuBackPtr, &m_channelDescr, m_Width, m_Height, cudaArraySurfaceLoadStore ));
checkCudaErrors( cudaMemcpyToArray( m_cuFrontPtr, 0, 0, m_dataFront, m_dataSize, cudaMemcpyHostToDevice));
checkCudaErrors( cudaMemcpyToArray( m_cuBackPtr, 0, 0, m_dataBack, m_dataSize, cudaMemcpyHostToDevice));
frontTex.addressMode[0] = cudaAddressModeWrap;
frontTex.addressMode[1] = cudaAddressModeWrap;
frontTex.filterMode = cudaFilterModeLinear;
frontTex.normalized = true;
veloXTex.addressMode[0] = cudaAddressModeWrap;
veloXTex.addressMode[1] = cudaAddressModeWrap;
veloXTex.filterMode = cudaFilterModeLinear;
veloXTex.normalized = true;
veloYTex.addressMode[0] = cudaAddressModeWrap;
veloYTex.addressMode[1] = cudaAddressModeWrap;
veloYTex.filterMode = cudaFilterModeLinear;
veloYTex.normalized = true;
bind();
}
void CudaBuffer::bind(bool bindFront){
unbind();
if(bindFront){
checkCudaErrors( cudaBindTextureToArray( frontTex, m_cuFrontPtr, m_channelDescr ));
checkCudaErrors( cudaBindSurfaceToArray( backSurface, m_cuBackPtr ));
}
switch(m_type){
case VELO_X: {
checkCudaErrors( cudaUnbindTexture( veloXTex) );
checkCudaErrors( cudaBindTextureToArray( veloXTex, m_cuFrontPtr, m_channelDescr ));
checkCudaErrors( cudaBindSurfaceToArray( veloXSurface, m_cuBackPtr ));
break;
}
case VELO_Y: {
checkCudaErrors( cudaUnbindTexture( veloYTex) );
checkCudaErrors( cudaBindTextureToArray( veloYTex, m_cuFrontPtr, m_channelDescr ));
checkCudaErrors( cudaBindSurfaceToArray( veloYSurface, m_cuBackPtr ));
break;
}
}
}
void CudaBuffer::unbind(){
checkCudaErrors( cudaUnbindTexture( frontTex) );
}
CudaBuffer::~CudaBuffer(){
delete []m_dataFront;
delete []m_dataBack;
checkCudaErrors( cudaUnbindTexture( veloXTex) );
checkCudaErrors( cudaUnbindTexture( veloYTex) );
checkCudaErrors( cudaUnbindTexture( frontTex) );
cudaFreeArray(m_cuFrontPtr);
cudaFreeArray(m_cuBackPtr);
}
float* CudaBuffer::beginUpdate(BufferType readBuffer, BufferType writeBuffer){
if(! m_isUpdating){
m_isUpdating = true;
bind();
if(readBuffer == CudaBuffer::FRONT){
get_front_data_Kernel<<< m_dimGrid, m_dimBlock , 0 >>>(m_dDataPtr, m_Width, m_Height);
checkCudaErrors( cudaMemcpy( m_dataFront, m_dDataPtr, m_dataSize, cudaMemcpyDeviceToHost) );
m_readPtr = m_dataFront;
}
if(readBuffer == CudaBuffer::BACK){
get_back_data_Kernel<<< m_dimGrid, m_dimBlock , 0 >>>(m_dDataPtr, m_Width);
checkCudaErrors( cudaMemcpy( m_dataBack, m_dDataPtr, m_dataSize, cudaMemcpyDeviceToHost) );
m_readPtr = m_dataBack;
}
if(writeBuffer == CudaBuffer::FRONT){
get_front_data_Kernel<<< m_dimGrid, m_dimBlock , 0 >>>(m_dDataPtr, m_Width, m_Height);
checkCudaErrors( cudaMemcpy( m_dataFront, m_dDataPtr, m_dataSize, cudaMemcpyDeviceToHost) );
m_writePtr = m_dataFront;
m_cuWritePtr = m_cuFrontPtr;
}
if(writeBuffer == CudaBuffer::BACK){
get_back_data_Kernel<<< m_dimGrid, m_dimBlock , 0 >>>(m_dDataPtr, m_Width);
checkCudaErrors( cudaMemcpy( m_dataBack, m_dDataPtr, m_dataSize, cudaMemcpyDeviceToHost) );
m_writePtr = m_dataBack;
m_cuWritePtr = m_cuBackPtr;
}
if(writeBuffer == CudaBuffer::NONE){
m_writePtr = 0;
m_cuWritePtr = 0;
}
if(readBuffer == CudaBuffer::NONE) m_readPtr = 0;
return m_writePtr;
}else{
throw UpdatingException("In beginUpdate");
}
}
void CudaBuffer::flushUpdate(){
if( m_isUpdating){
if (m_cuWritePtr){
checkCudaErrors( cudaMemcpyToArray( m_cuWritePtr, 0, 0, m_writePtr, m_dataSize, cudaMemcpyHostToDevice));
}else{
throw UpdatingException("Can't flush to NULL");
}
}else{
throw UpdatingException("In flushUpdate");
}
}
void CudaBuffer::endUpdate(bool flush){
if( m_isUpdating){
if(flush){
flushUpdate();
}
m_cuWritePtr = 0;
m_writePtr = 0;
m_readPtr = 0;
m_isUpdating = false;
}else{
throw UpdatingException("In Flush");
}
}
void CudaBuffer::flip(){
if(! m_isUpdating){
checkCudaErrors( cudaUnbindTexture( frontTex ));
std::swap(m_cuFrontPtr, m_cuBackPtr);
checkCudaErrors( cudaBindTextureToArray( frontTex, m_cuFrontPtr, m_channelDescr ));
checkCudaErrors( cudaBindSurfaceToArray( backSurface, m_cuBackPtr));
}else{
throw UpdatingException("In flip");
}
}
void CudaBuffer::setValue(const size_t x, const size_t y, const float value)
{
if(m_isUpdating){
setValue(x, y, value, 1.0f);
}else{
throw UpdatingException("In Set Value");
}
}
void CudaBuffer::setValue(const size_t x, const size_t y, const float value, const float weight)
{
if(m_isUpdating){
m_writePtr[getIndex(x, y)] = weight*value + (1 - weight)* m_readPtr[getIndex(x, y)] ;
}else{
throw UpdatingException("In SetValue");
}
}
const float& CudaBuffer::operator()(const size_t x, const size_t y) const
{
return m_readPtr[getIndex(x, y)];
}
const float& CudaBuffer::operator()(const int x, const int y) const
{
return m_readPtr[getIndex(x, y)];
}
float CudaBuffer::operator()(float x, float y) const
{
while(x<0)
x+=m_Width;
while(x>=m_Width)
x-=m_Width;
while(y<0)
y+=m_Height;
while(y>=m_Height)
y-=m_Height;
float nx = x-(int)x;
float ny = y-(int)y;
int iX = (int)x;
int iY = (int)y;
if(!m_readPtr){
#ifdef DEBUG_OUTPUT
std::cout << "No Read Ptr\n";
#endif
}
float xInter1 = m_readPtr[getIndex(iX, iY)]*(1-nx) + m_readPtr[getIndex(iX+1, iY)]*nx;
float xInter2 = m_readPtr[getIndex(iX, iY+1)]*(1-nx) + m_readPtr[getIndex(iX+1, iY+1)]*nx;
return xInter1*(1-ny)+xInter2*ny;
}
//GLOBAL FIELDS
extern "C" __global__ void
get_display_Kernel(float * g_data, int widthFactor, float pressureContrast,float veloContrast,int width, int height){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x<width) {
//Pressure display
float u = cuGetCoord(x-width, width);
float v = cuGetCoord(y, height);
float val = tex2D(frontTex, u, v) * pressureContrast;
int coord = (y*width*widthFactor+x)*3;
g_data[coord+2] = val;
g_data[coord+1] = val;
g_data[coord] = val;
}else{
//Speed display
float u = cuGetCoord(x-width, width);
float v = cuGetCoord(y, height);
int coord = (y*width*widthFactor+x)*3;
g_data[coord] = tex2D(veloXTex, u, v)*0.5f*veloContrast + 0.5f;
g_data[coord+1] = tex2D(veloYTex, u, v)*0.5f*veloContrast + 0.5f;
g_data[coord+2] = 0;
}
}
void TmpFields::flip(){
checkCudaErrors( cudaUnbindTexture( tmp1Texture) );
checkCudaErrors( cudaUnbindTexture( tmp2Texture) );
std::swap(tmp1SurfacePtr, tmp2SurfacePtr);
checkCudaErrors( cudaBindTextureToArray( tmp1Texture, tmp1SurfacePtr, channelDescr ));
checkCudaErrors( cudaBindSurfaceToArray( tmp1Surface, tmp1SurfacePtr ));
checkCudaErrors( cudaBindTextureToArray( tmp2Texture, tmp2SurfacePtr, channelDescr ));
checkCudaErrors( cudaBindSurfaceToArray( tmp2Surface, tmp2SurfacePtr ));
};
void TmpFields::bind(){
checkCudaErrors( cudaBindTextureToArray( tmp1Texture, tmp1SurfacePtr, channelDescr ));
checkCudaErrors( cudaBindSurfaceToArray( tmp1Surface, tmp1SurfacePtr ));
checkCudaErrors( cudaBindTextureToArray( tmp2Texture, tmp2SurfacePtr, channelDescr ));
checkCudaErrors( cudaBindSurfaceToArray( tmp2Surface, tmp2SurfacePtr ));
checkCudaErrors( cudaBindTextureToArray( divTexture, divSurfacePtr, channelDescr ));
checkCudaErrors( cudaBindSurfaceToArray( divSurface, divSurfacePtr ));
}
void TmpFields::init(int width, int height){
//Display memory
m_displayDataSize = width*2*height*sizeof(float)*3;
checkCudaErrors( cudaMalloc( (void**) &m_displayPtr, m_displayDataSize));
//Tex'n'Surf
channelDescr = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
checkCudaErrors( cudaMallocArray( &tmp1SurfacePtr, &channelDescr, width, height, cudaArraySurfaceLoadStore ));
checkCudaErrors( cudaMallocArray( &tmp2SurfacePtr, &channelDescr, width, height, cudaArraySurfaceLoadStore ));
checkCudaErrors( cudaMallocArray( &divSurfacePtr, &channelDescr, width, height, cudaArraySurfaceLoadStore ));
tmp1Texture.addressMode[0] = cudaAddressModeWrap;
tmp1Texture.addressMode[1] = cudaAddressModeWrap;
tmp1Texture.filterMode = cudaFilterModePoint;
tmp1Texture.normalized = true;
tmp2Texture.addressMode[0] = cudaAddressModeWrap;
tmp2Texture.addressMode[1] = cudaAddressModeWrap;
tmp2Texture.filterMode = cudaFilterModePoint;
tmp2Texture.normalized = true;
divTexture.addressMode[0] = cudaAddressModeWrap;
divTexture.addressMode[1] = cudaAddressModeWrap;
divTexture.filterMode = cudaFilterModePoint;
divTexture.normalized = true;
}
void TmpFields::free(){
checkCudaErrors( cudaUnbindTexture( tmp1Texture) );
checkCudaErrors( cudaUnbindTexture( tmp2Texture) );
checkCudaErrors( cudaUnbindTexture( divTexture) );
cudaFreeArray(tmp1SurfacePtr);
cudaFreeArray(tmp2SurfacePtr);
cudaFreeArray(divSurfacePtr);
}
|
1c7ca3e6b327e7eac8127f8778adfe46a64b93b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "taso/ops.h"
#include "taso/cuda_helper.h"
using namespace taso;
void Unsqueeze::map(void)
{
// allocate tensors
size_t outputSize = sizeof(DATATYPE);
for (int i = 0; i < outputs[0].numDim; i++)
outputSize *= outputs[0].dim[i];
checkCUDA(hipMalloc(&outputs[0].data_ptr, outputSize));
}
void Unsqueeze::unmap(void)
{
checkCUDA(hipFree(outputs[0].data_ptr));
}
void Unsqueeze::forward(bool block)
{
hipLaunchKernelGGL(( copy_kernel), dim3(GET_BLOCKS(outputs[0].volume())), dim3(CUDA_NUM_THREADS), 0, 0,
(float*)outputs[0].data_ptr, (float*)inputs[0].data_ptr, outputs[0].volume());
if (block)
checkCUDA(hipDeviceSynchronize());
}
void Model::measure_unsqueeze_cost(Unsqueeze* unsqz)
{
checkCUDA(hipDeviceSynchronize());
checkCUDA(hipEventRecord(startEvent));
for (int i = 0; i < REPEAT_TIMES; i++) {
hipLaunchKernelGGL(( copy_kernel), dim3(GET_BLOCKS(unsqz->outputs[0].volume())), dim3(CUDA_NUM_THREADS), 0, 0,
outputPtr, inputPtr, unsqz->outputs[0].volume());
}
checkCUDA(hipEventRecord(endEvent));
checkCUDA(hipEventSynchronize(endEvent));
float milliseconds;
hipEventElapsedTime(&milliseconds, startEvent, endEvent);
unsqz->runtime = milliseconds / REPEAT_TIMES;
if (print_cost)
printf(" measure[Squeeeze]: cost(%.4lf)\n", unsqz->runtime);
}
| 1c7ca3e6b327e7eac8127f8778adfe46a64b93b2.cu | /* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "taso/ops.h"
#include "taso/cuda_helper.h"
using namespace taso;
void Unsqueeze::map(void)
{
// allocate tensors
size_t outputSize = sizeof(DATATYPE);
for (int i = 0; i < outputs[0].numDim; i++)
outputSize *= outputs[0].dim[i];
checkCUDA(cudaMalloc(&outputs[0].data_ptr, outputSize));
}
void Unsqueeze::unmap(void)
{
checkCUDA(cudaFree(outputs[0].data_ptr));
}
void Unsqueeze::forward(bool block)
{
copy_kernel<<<GET_BLOCKS(outputs[0].volume()), CUDA_NUM_THREADS>>>(
(float*)outputs[0].data_ptr, (float*)inputs[0].data_ptr, outputs[0].volume());
if (block)
checkCUDA(cudaDeviceSynchronize());
}
void Model::measure_unsqueeze_cost(Unsqueeze* unsqz)
{
checkCUDA(cudaDeviceSynchronize());
checkCUDA(cudaEventRecord(startEvent));
for (int i = 0; i < REPEAT_TIMES; i++) {
copy_kernel<<<GET_BLOCKS(unsqz->outputs[0].volume()), CUDA_NUM_THREADS>>>(
outputPtr, inputPtr, unsqz->outputs[0].volume());
}
checkCUDA(cudaEventRecord(endEvent));
checkCUDA(cudaEventSynchronize(endEvent));
float milliseconds;
cudaEventElapsedTime(&milliseconds, startEvent, endEvent);
unsqz->runtime = milliseconds / REPEAT_TIMES;
if (print_cost)
printf(" measure[Squeeeze]: cost(%.4lf)\n", unsqz->runtime);
}
|
240e362ac0de462ee60a180f92c4c61ef943fb1a.hip | // !!! This is a file automatically generated by hipify!!!
/*
Author: Ashutosh Sanan
Date: 05/05/2016
The main CUDA kernel to compute the gradient and HOG descriptor
Contains two distinct kernels for each
*/
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "hip/hip_runtime_api.h"
#include <math.h>
#include <unistd.h>
#define PI 3.14159265
#define TILE_WIDTH 16
#define Mask_width 3
#define Mask_radius Mask_width/2
#define w (TILE_WIDTH + Mask_width - 1)
extern "C" {
#include "global.h"
}
// CUDA kernel to calculate the gradient
__global__ void gradient_kernel(float* src_ptr, float* dst_ptr, float* theta_ptr, int rows, int cols)
{
__shared__ float image[w][w]; // Using shared memory for faster memory access
int width = cols;
int height = rows;
int tx = threadIdx.x;
int ty = threadIdx.y; //Thread ID's
int colIdx = blockDim.x * blockIdx.x + threadIdx.x;
int rowIdx = blockDim.y * blockIdx.y + threadIdx.y;
int dest = threadIdx.y * TILE_WIDTH + threadIdx.x;
int destY = dest / w;
int destX = dest % w;
int srcY = blockIdx.y * TILE_WIDTH + destY - Mask_radius;
int srcX = blockIdx.x * TILE_WIDTH + destX - Mask_radius;
int src = (srcY * width + srcX);
if(srcY>=0 && srcY<height && srcX>=0 && srcX <width)
image[destY][destX] = src_ptr[src];
else
image[destY][destX] = 0;
dest = threadIdx.y * TILE_WIDTH + threadIdx.x + TILE_WIDTH*TILE_WIDTH;
destY = dest / w;
destX = dest % w;
srcY = blockIdx.y * TILE_WIDTH + destY - Mask_radius;
srcX = blockIdx.x * TILE_WIDTH + destX - Mask_radius;
src = srcY * width + srcX;
if(destY < w)
{
if(srcY >= 0 && srcY < height && srcX >=0 && srcX < width)
image[destY][destX] = src_ptr[src];
else
image[destY][destX] = 0;
}
__syncthreads(); // Wait for all threads (synchronization)
float x1,x2,theta_local;
int x = tx + 1;
int y = ty + 1;
x1 = image[y][x+1] - image[y][x-1];
x2 = image[y+1][x] - image[y-1][x];
dst_ptr[rowIdx*cols + colIdx] = sqrt(x1*x1 + x2*x2);
theta_local = atan2(x2,x1)*180/PI;
if(theta_local < 0)
theta_local = theta_local + 360;
theta_ptr[rowIdx*cols + colIdx] = theta_local;
}
// CUDA kernel to calculate the HOG features
__global__ void d_compute_desc_kernel(float* mag_d, float* theta_d, float* blocks_desc_d, int rows, int cols)
{
volatile __shared__ float s_block[32][4][NBINS];
// volatile __shared__ float s_squares[4];
const int cellIdx = threadIdx.x; //cell in a block: 0-3
const int columnIdx = threadIdx.y; //which column for the particular thread 0-7
const int sIdx = threadIdx.y*blockDim.x + threadIdx.x; //The actual threadid out of 32
// position of the upper-most pixel in the column for this thread
const int blockX = (cellIdx % 2)*HOG_CELL_SIZE + columnIdx;
const int blockY = cellIdx < 2 ? 0 : HOG_CELL_SIZE;
const int pixelX = blockIdx.x * (HOG_BLOCK_WIDTH/2) + blockX; // we assume 50% overlap
const int pixelY = blockIdx.y * (HOG_BLOCK_HEIGHT/2) + blockY;
// initialize all bins for this thread
for(int i=0; i < NBINS; i++)
{
for(int cell =0; cell < HOG_BLOCK_CELLS_X*HOG_BLOCK_CELLS_Y; cell++)
s_block[sIdx][cell][i] = 0.f;
}
__syncthreads();
//<---------------------------------------------------------------------------------------------------------------------------------------->
if(pixelX < cols && pixelY < rows)
{
for(int i=0; i<HOG_CELL_SIZE; i++)
{
const int pixelIdx = (pixelY + i)*cols + pixelX;
float contribution = mag_d[pixelIdx];
float binSize = 360.f/NBINS;
float orientation = theta_d[pixelIdx] - binSize/2.f;;
if(orientation < 0)
orientation += 360.f;
float delta = (orientation * NBINS)/360.f;
int leftBin = (int)floorf(delta);
delta -= leftBin;
int rightBin = leftBin >= (NBINS-1) ? 0 : leftBin + 1;
if(leftBin < 0)
leftBin = NBINS-1;
float rightContribution = contribution * delta;
float leftContribution = contribution * (1-delta);
s_block[sIdx][0][leftBin] += leftContribution;
s_block[sIdx][0][rightBin]+= rightContribution;
s_block[sIdx][1][leftBin] += leftContribution;
s_block[sIdx][1][rightBin]+= rightContribution;
s_block[sIdx][2][leftBin] += leftContribution;
s_block[sIdx][2][rightBin]+= rightContribution;
s_block[sIdx][3][leftBin] += leftContribution;
s_block[sIdx][3][rightBin]+= rightContribution;
}
}
__syncthreads();
//<------------------------------------------------------------------------------------------------------------------------>
if(threadIdx.y == 0);
{
for(int i=1; i<32; i++)
{
for(int bin=0; bin<NBINS; bin++)
{
s_block[0][threadIdx.x][bin] += s_block[i][threadIdx.x][bin];
}
}
}
__syncthreads();
// Normalizing in a different way in the main function so not using L2+Hys normalization
/*
// normalize the block histogram - L2+Hys normalization
const float epsilon = 0.036f * 0.036f; // magic numbers
const float eHys = 0.1f * 0.1f;
const float clipThreshold = 0.2f;
if(threadIdx.y == 0 )
{
float ls = 0.f;
for(int j=0; j < NBINS; j++)
{
ls += s_block[0][threadIdx.x][j] * s_block[0][threadIdx.x][j];
}
s_squares[threadIdx.x] = ls;
}
__syncthreads();
if(threadIdx.y == 0 && threadIdx.x == 0 )
{
s_squares[0] += s_squares[1] + s_squares[2] + s_squares[3];
}
__syncthreads();
// we use rsqrtf (reciprocal sqrtf) because of CUDA pecularities
float normalization = rsqrtf(s_squares[0]+epsilon);
// normalize and clip
if(threadIdx.y == 0 )
{
for(int j=0; j < NBINS; j++)
{
s_block[0][threadIdx.x][j] *= normalization;
s_block[0][threadIdx.x][j] = s_block[0][threadIdx.x][j] > clipThreshold ? clipThreshold : s_block[0][threadIdx.x][j];
}
}
// renormalize
if(threadIdx.y == 0 )
{
float ls = 0.f;
for(int j=0; j < NBINS; j++)
{
ls += s_block[0][threadIdx.x][j] * s_block[0][threadIdx.x][j];
}
s_squares[threadIdx.x] = ls;
}
__syncthreads();
if(threadIdx.y == 0 && threadIdx.x == 0 )
{
s_squares[0] += s_squares[1] + s_squares[2] + s_squares[3];
}
normalization = rsqrtf(s_squares[0]+eHys);
if(threadIdx.y == 0 )
{
for(int j=0; j < NBINS; j++)
{
s_block[0][threadIdx.x][j] *= normalization;
}
}
*/
// Save the calculated descriptor
if(threadIdx.y == 0 )
{
const int writeIdx = NBINS*4 * (blockIdx.y * gridDim.x + blockIdx.x);
for(int bin=0; bin < NBINS; bin++)
{
//printf("In saving part\n");
blocks_desc_d[writeIdx + threadIdx.x*NBINS + bin] = s_block[0][threadIdx.x][bin];
if(writeIdx + threadIdx.x*NBINS + bin == 0)
printf("Value: %f\n",blocks_desc_d[writeIdx + threadIdx.x*NBINS + bin]);
}
}
}
// This function is called through C++, therefore using extern notation
extern "C" void gradient_kernel_caller(float* img_h, float* mag_h, float* hog_descriptor, int rows, int cols)
{
float* img_d, *mag_d, *theta_d;
// Allocate GPU memory
hipMalloc((void **) &img_d, rows*cols*sizeof(float));
hipMalloc((void **) &mag_d, rows*cols*sizeof(float));
hipMalloc((void **) &theta_d, rows*cols*sizeof(float));
//int i;
//Copy the image to GPU (device)
hipMemcpy(img_d, img_h, rows*cols*sizeof(float), hipMemcpyHostToDevice);
const int bl_size_x = 16;
const int bl_size_y = 16;
// Threads and grid size
dim3 threads(bl_size_x, bl_size_y);
dim3 grid((int)ceil(rows/(float)bl_size_x), (int)ceil(cols/(float)bl_size_y));
// Calling the kernel to calculate Gradient
hipLaunchKernelGGL(( gradient_kernel), dim3(grid),dim3(threads), 0, 0, img_d, mag_d, theta_d, rows, cols);
hipMemcpy(mag_h, mag_d, rows*cols*sizeof(float), hipMemcpyDeviceToHost);
// hipMemcpy(theta_h, theta_d, rows*cols*sizeof(float), hipMemcpyDeviceToHost);
float* blocks_desc_d;
const int nBlocks = ((rows/8)-1)*((cols/8)-1);
const int total_blocks_size = nBlocks * HOG_BLOCK_CELLS_X * HOG_BLOCK_CELLS_Y * NBINS * sizeof(float);
hipMalloc((void**)&blocks_desc_d, total_blocks_size);
dim3 dimGrid;
dimGrid.x = (int)floor(cols/8.f)-1;
dimGrid.y = (int)floor(rows/8.f)-1;
dim3 dimBlock(4,8);
printf("Grid: %d\t%d\n", dimBlock.x, dimBlock.y);
// Calling the kernel to calculate HOG descriptors
hipLaunchKernelGGL(( d_compute_desc_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, mag_d, theta_d, blocks_desc_d, rows, cols);
hipMemcpy(hog_descriptor, blocks_desc_d, total_blocks_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipPeekAtLastError();
}
| 240e362ac0de462ee60a180f92c4c61ef943fb1a.cu | /*
Author: Ashutosh Sanan
Date: 05/05/2016
The main CUDA kernel to compute the gradient and HOG descriptor
Contains two distinct kernels for each
*/
#include "cuda.h"
#include <stdio.h>
#include "cuda_runtime_api.h"
#include <math.h>
#include <unistd.h>
#define PI 3.14159265
#define TILE_WIDTH 16
#define Mask_width 3
#define Mask_radius Mask_width/2
#define w (TILE_WIDTH + Mask_width - 1)
extern "C" {
#include "global.h"
}
// CUDA kernel to calculate the gradient
__global__ void gradient_kernel(float* src_ptr, float* dst_ptr, float* theta_ptr, int rows, int cols)
{
__shared__ float image[w][w]; // Using shared memory for faster memory access
int width = cols;
int height = rows;
int tx = threadIdx.x;
int ty = threadIdx.y; //Thread ID's
int colIdx = blockDim.x * blockIdx.x + threadIdx.x;
int rowIdx = blockDim.y * blockIdx.y + threadIdx.y;
int dest = threadIdx.y * TILE_WIDTH + threadIdx.x;
int destY = dest / w;
int destX = dest % w;
int srcY = blockIdx.y * TILE_WIDTH + destY - Mask_radius;
int srcX = blockIdx.x * TILE_WIDTH + destX - Mask_radius;
int src = (srcY * width + srcX);
if(srcY>=0 && srcY<height && srcX>=0 && srcX <width)
image[destY][destX] = src_ptr[src];
else
image[destY][destX] = 0;
dest = threadIdx.y * TILE_WIDTH + threadIdx.x + TILE_WIDTH*TILE_WIDTH;
destY = dest / w;
destX = dest % w;
srcY = blockIdx.y * TILE_WIDTH + destY - Mask_radius;
srcX = blockIdx.x * TILE_WIDTH + destX - Mask_radius;
src = srcY * width + srcX;
if(destY < w)
{
if(srcY >= 0 && srcY < height && srcX >=0 && srcX < width)
image[destY][destX] = src_ptr[src];
else
image[destY][destX] = 0;
}
__syncthreads(); // Wait for all threads (synchronization)
float x1,x2,theta_local;
int x = tx + 1;
int y = ty + 1;
x1 = image[y][x+1] - image[y][x-1];
x2 = image[y+1][x] - image[y-1][x];
dst_ptr[rowIdx*cols + colIdx] = sqrt(x1*x1 + x2*x2);
theta_local = atan2(x2,x1)*180/PI;
if(theta_local < 0)
theta_local = theta_local + 360;
theta_ptr[rowIdx*cols + colIdx] = theta_local;
}
// CUDA kernel to calculate the HOG features
__global__ void d_compute_desc_kernel(float* mag_d, float* theta_d, float* blocks_desc_d, int rows, int cols)
{
volatile __shared__ float s_block[32][4][NBINS];
// volatile __shared__ float s_squares[4];
const int cellIdx = threadIdx.x; //cell in a block: 0-3
const int columnIdx = threadIdx.y; //which column for the particular thread 0-7
const int sIdx = threadIdx.y*blockDim.x + threadIdx.x; //The actual threadid out of 32
// position of the upper-most pixel in the column for this thread
const int blockX = (cellIdx % 2)*HOG_CELL_SIZE + columnIdx;
const int blockY = cellIdx < 2 ? 0 : HOG_CELL_SIZE;
const int pixelX = blockIdx.x * (HOG_BLOCK_WIDTH/2) + blockX; // we assume 50% overlap
const int pixelY = blockIdx.y * (HOG_BLOCK_HEIGHT/2) + blockY;
// initialize all bins for this thread
for(int i=0; i < NBINS; i++)
{
for(int cell =0; cell < HOG_BLOCK_CELLS_X*HOG_BLOCK_CELLS_Y; cell++)
s_block[sIdx][cell][i] = 0.f;
}
__syncthreads();
//<---------------------------------------------------------------------------------------------------------------------------------------->
if(pixelX < cols && pixelY < rows)
{
for(int i=0; i<HOG_CELL_SIZE; i++)
{
const int pixelIdx = (pixelY + i)*cols + pixelX;
float contribution = mag_d[pixelIdx];
float binSize = 360.f/NBINS;
float orientation = theta_d[pixelIdx] - binSize/2.f;;
if(orientation < 0)
orientation += 360.f;
float delta = (orientation * NBINS)/360.f;
int leftBin = (int)floorf(delta);
delta -= leftBin;
int rightBin = leftBin >= (NBINS-1) ? 0 : leftBin + 1;
if(leftBin < 0)
leftBin = NBINS-1;
float rightContribution = contribution * delta;
float leftContribution = contribution * (1-delta);
s_block[sIdx][0][leftBin] += leftContribution;
s_block[sIdx][0][rightBin]+= rightContribution;
s_block[sIdx][1][leftBin] += leftContribution;
s_block[sIdx][1][rightBin]+= rightContribution;
s_block[sIdx][2][leftBin] += leftContribution;
s_block[sIdx][2][rightBin]+= rightContribution;
s_block[sIdx][3][leftBin] += leftContribution;
s_block[sIdx][3][rightBin]+= rightContribution;
}
}
__syncthreads();
//<------------------------------------------------------------------------------------------------------------------------>
if(threadIdx.y == 0);
{
for(int i=1; i<32; i++)
{
for(int bin=0; bin<NBINS; bin++)
{
s_block[0][threadIdx.x][bin] += s_block[i][threadIdx.x][bin];
}
}
}
__syncthreads();
// Normalizing in a different way in the main function so not using L2+Hys normalization
/*
// normalize the block histogram - L2+Hys normalization
const float epsilon = 0.036f * 0.036f; // magic numbers
const float eHys = 0.1f * 0.1f;
const float clipThreshold = 0.2f;
if(threadIdx.y == 0 )
{
float ls = 0.f;
for(int j=0; j < NBINS; j++)
{
ls += s_block[0][threadIdx.x][j] * s_block[0][threadIdx.x][j];
}
s_squares[threadIdx.x] = ls;
}
__syncthreads();
if(threadIdx.y == 0 && threadIdx.x == 0 )
{
s_squares[0] += s_squares[1] + s_squares[2] + s_squares[3];
}
__syncthreads();
// we use rsqrtf (reciprocal sqrtf) because of CUDA pecularities
float normalization = rsqrtf(s_squares[0]+epsilon);
// normalize and clip
if(threadIdx.y == 0 )
{
for(int j=0; j < NBINS; j++)
{
s_block[0][threadIdx.x][j] *= normalization;
s_block[0][threadIdx.x][j] = s_block[0][threadIdx.x][j] > clipThreshold ? clipThreshold : s_block[0][threadIdx.x][j];
}
}
// renormalize
if(threadIdx.y == 0 )
{
float ls = 0.f;
for(int j=0; j < NBINS; j++)
{
ls += s_block[0][threadIdx.x][j] * s_block[0][threadIdx.x][j];
}
s_squares[threadIdx.x] = ls;
}
__syncthreads();
if(threadIdx.y == 0 && threadIdx.x == 0 )
{
s_squares[0] += s_squares[1] + s_squares[2] + s_squares[3];
}
normalization = rsqrtf(s_squares[0]+eHys);
if(threadIdx.y == 0 )
{
for(int j=0; j < NBINS; j++)
{
s_block[0][threadIdx.x][j] *= normalization;
}
}
*/
// Save the calculated descriptor
if(threadIdx.y == 0 )
{
const int writeIdx = NBINS*4 * (blockIdx.y * gridDim.x + blockIdx.x);
for(int bin=0; bin < NBINS; bin++)
{
//printf("In saving part\n");
blocks_desc_d[writeIdx + threadIdx.x*NBINS + bin] = s_block[0][threadIdx.x][bin];
if(writeIdx + threadIdx.x*NBINS + bin == 0)
printf("Value: %f\n",blocks_desc_d[writeIdx + threadIdx.x*NBINS + bin]);
}
}
}
// This function is called through C++, therefore using extern notation
extern "C" void gradient_kernel_caller(float* img_h, float* mag_h, float* hog_descriptor, int rows, int cols)
{
float* img_d, *mag_d, *theta_d;
// Allocate GPU memory
cudaMalloc((void **) &img_d, rows*cols*sizeof(float));
cudaMalloc((void **) &mag_d, rows*cols*sizeof(float));
cudaMalloc((void **) &theta_d, rows*cols*sizeof(float));
//int i;
//Copy the image to GPU (device)
cudaMemcpy(img_d, img_h, rows*cols*sizeof(float), cudaMemcpyHostToDevice);
const int bl_size_x = 16;
const int bl_size_y = 16;
// Threads and grid size
dim3 threads(bl_size_x, bl_size_y);
dim3 grid((int)ceil(rows/(float)bl_size_x), (int)ceil(cols/(float)bl_size_y));
// Calling the kernel to calculate Gradient
gradient_kernel<<<grid,threads>>>(img_d, mag_d, theta_d, rows, cols);
cudaMemcpy(mag_h, mag_d, rows*cols*sizeof(float), cudaMemcpyDeviceToHost);
// cudaMemcpy(theta_h, theta_d, rows*cols*sizeof(float), cudaMemcpyDeviceToHost);
float* blocks_desc_d;
const int nBlocks = ((rows/8)-1)*((cols/8)-1);
const int total_blocks_size = nBlocks * HOG_BLOCK_CELLS_X * HOG_BLOCK_CELLS_Y * NBINS * sizeof(float);
cudaMalloc((void**)&blocks_desc_d, total_blocks_size);
dim3 dimGrid;
dimGrid.x = (int)floor(cols/8.f)-1;
dimGrid.y = (int)floor(rows/8.f)-1;
dim3 dimBlock(4,8);
printf("Grid: %d\t%d\n", dimBlock.x, dimBlock.y);
// Calling the kernel to calculate HOG descriptors
d_compute_desc_kernel<<<dimGrid, dimBlock>>>(mag_d, theta_d, blocks_desc_d, rows, cols);
cudaMemcpy(hog_descriptor, blocks_desc_d, total_blocks_size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaPeekAtLastError();
}
|
37ab7274f22704da8b5214443f5db11c32d4d4d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_yvel_plus_4_right;
int xdim0_update_halo_kernel2_yvel_plus_4_right_h = -1;
__constant__ int ydim0_update_halo_kernel2_yvel_plus_4_right;
int ydim0_update_halo_kernel2_yvel_plus_4_right_h = -1;
__constant__ int xdim1_update_halo_kernel2_yvel_plus_4_right;
int xdim1_update_halo_kernel2_yvel_plus_4_right_h = -1;
__constant__ int ydim1_update_halo_kernel2_yvel_plus_4_right;
int ydim1_update_halo_kernel2_yvel_plus_4_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_yvel_plus_4_right*(y)+xdim0_update_halo_kernel2_yvel_plus_4_right*ydim0_update_halo_kernel2_yvel_plus_4_right*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_yvel_plus_4_right*(y)+xdim1_update_halo_kernel2_yvel_plus_4_right*ydim1_update_halo_kernel2_yvel_plus_4_right*(z))
//user function
__device__
inline void update_halo_kernel2_yvel_plus_4_right_gpu(double *yvel0, double *yvel1, const int* fields)
{
if(fields[FIELD_YVEL0] == 1) yvel0[OPS_ACC0(0,0,0)] = yvel0[OPS_ACC0(-4,0,0)];
if(fields[FIELD_YVEL1] == 1) yvel1[OPS_ACC1(0,0,0)] = yvel1[OPS_ACC1(-4,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_yvel_plus_4_right(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel2_yvel_plus_4_right + idx_z * 1*1 * xdim0_update_halo_kernel2_yvel_plus_4_right * ydim0_update_halo_kernel2_yvel_plus_4_right;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel2_yvel_plus_4_right + idx_z * 1*1 * xdim1_update_halo_kernel2_yvel_plus_4_right * ydim1_update_halo_kernel2_yvel_plus_4_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_yvel_plus_4_right_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_4_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_yvel_plus_4_right_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,41)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(41,"update_halo_kernel2_yvel_plus_4_right");
OPS_kernels[41].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_yvel_plus_4_right_h || ydim0 != ydim0_update_halo_kernel2_yvel_plus_4_right_h || xdim1 != xdim1_update_halo_kernel2_yvel_plus_4_right_h || ydim1 != ydim1_update_halo_kernel2_yvel_plus_4_right_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel2_yvel_plus_4_right, &xdim0, sizeof(int) );
xdim0_update_halo_kernel2_yvel_plus_4_right_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel2_yvel_plus_4_right, &ydim0, sizeof(int) );
ydim0_update_halo_kernel2_yvel_plus_4_right_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel2_yvel_plus_4_right, &xdim1, sizeof(int) );
xdim1_update_halo_kernel2_yvel_plus_4_right_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel2_yvel_plus_4_right, &ydim1, sizeof(int) );
ydim1_update_halo_kernel2_yvel_plus_4_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[41].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel2_yvel_plus_4_right), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[41].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[41].mpi_time += t2-t1;
OPS_kernels[41].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[41].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_4_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 41;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 41;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_yvel_plus_4_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(41,"update_halo_kernel2_yvel_plus_4_right");
}
ops_enqueue_kernel(desc);
}
#endif
| 37ab7274f22704da8b5214443f5db11c32d4d4d5.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_yvel_plus_4_right;
int xdim0_update_halo_kernel2_yvel_plus_4_right_h = -1;
__constant__ int ydim0_update_halo_kernel2_yvel_plus_4_right;
int ydim0_update_halo_kernel2_yvel_plus_4_right_h = -1;
__constant__ int xdim1_update_halo_kernel2_yvel_plus_4_right;
int xdim1_update_halo_kernel2_yvel_plus_4_right_h = -1;
__constant__ int ydim1_update_halo_kernel2_yvel_plus_4_right;
int ydim1_update_halo_kernel2_yvel_plus_4_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_yvel_plus_4_right*(y)+xdim0_update_halo_kernel2_yvel_plus_4_right*ydim0_update_halo_kernel2_yvel_plus_4_right*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_yvel_plus_4_right*(y)+xdim1_update_halo_kernel2_yvel_plus_4_right*ydim1_update_halo_kernel2_yvel_plus_4_right*(z))
//user function
__device__
inline void update_halo_kernel2_yvel_plus_4_right_gpu(double *yvel0, double *yvel1, const int* fields)
{
if(fields[FIELD_YVEL0] == 1) yvel0[OPS_ACC0(0,0,0)] = yvel0[OPS_ACC0(-4,0,0)];
if(fields[FIELD_YVEL1] == 1) yvel1[OPS_ACC1(0,0,0)] = yvel1[OPS_ACC1(-4,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_yvel_plus_4_right(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel2_yvel_plus_4_right + idx_z * 1*1 * xdim0_update_halo_kernel2_yvel_plus_4_right * ydim0_update_halo_kernel2_yvel_plus_4_right;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel2_yvel_plus_4_right + idx_z * 1*1 * xdim1_update_halo_kernel2_yvel_plus_4_right * ydim1_update_halo_kernel2_yvel_plus_4_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_yvel_plus_4_right_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_4_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_yvel_plus_4_right_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,41)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(41,"update_halo_kernel2_yvel_plus_4_right");
OPS_kernels[41].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_yvel_plus_4_right_h || ydim0 != ydim0_update_halo_kernel2_yvel_plus_4_right_h || xdim1 != xdim1_update_halo_kernel2_yvel_plus_4_right_h || ydim1 != ydim1_update_halo_kernel2_yvel_plus_4_right_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel2_yvel_plus_4_right, &xdim0, sizeof(int) );
xdim0_update_halo_kernel2_yvel_plus_4_right_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel2_yvel_plus_4_right, &ydim0, sizeof(int) );
ydim0_update_halo_kernel2_yvel_plus_4_right_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel2_yvel_plus_4_right, &xdim1, sizeof(int) );
xdim1_update_halo_kernel2_yvel_plus_4_right_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel2_yvel_plus_4_right, &ydim1, sizeof(int) );
ydim1_update_halo_kernel2_yvel_plus_4_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[41].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel2_yvel_plus_4_right<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[41].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[41].mpi_time += t2-t1;
OPS_kernels[41].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[41].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_4_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 41;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 41;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_yvel_plus_4_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(41,"update_halo_kernel2_yvel_plus_4_right");
}
ops_enqueue_kernel(desc);
}
#endif
|
c47ae64dd135970b353b51e626bc54bf16b68c79.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @brief Benchmarks for ragged_ops.
*
* @copyright
* Copyright (c) 2020 Xiaomi Corporation (authors: Fangjun Kuang)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <cstdlib>
#include "k2/csrc/benchmark/benchmark.h"
#include "k2/csrc/fsa_utils.h"
#include "k2/csrc/ragged_ops.h"
namespace k2 {
static BenchmarkStat BenchmarkGetTransposeReordering(int32_t dim,
DeviceType device_type) {
ContextPtr context;
if (device_type == kCpu) {
context = GetCpuContext();
} else {
K2_CHECK_EQ(device_type, kCuda);
context = GetCudaContext();
}
int32_t num_iter = ::min(100, 10000 / dim);
int32_t min_num_fsas = dim;
int32_t max_num_fsas = dim * 2;
bool acyclic = false;
int32_t max_symbol = 100;
int32_t min_num_arcs = min_num_fsas * 10;
int32_t max_num_arcs = max_num_fsas * 20;
FsaVec fsas = RandomFsaVec(min_num_fsas, max_num_fsas, acyclic, max_symbol,
min_num_arcs, max_num_arcs);
fsas = fsas.To(context);
Array1<int32_t> dest_states = GetDestStates(fsas, true);
Ragged<int32_t> dest_states_tensor(fsas.shape, dest_states);
int32_t num_fsas = fsas.TotSize(0);
int32_t num_states = fsas.TotSize(1);
int32_t num_arcs = fsas.TotSize(2);
BenchmarkStat stat;
stat.op_name = "GetTransposeReordering_" + std::to_string(num_fsas) + "_" +
std::to_string(num_states) + "_" + std::to_string(num_arcs);
stat.num_iter = num_iter;
stat.problem_size = dim;
stat.dtype_name = TraitsOf(DtypeOf<int32_t>::dtype).Name();
stat.device_type = device_type;
stat.eplased_per_iter =
BenchmarkOp(num_iter, context, &GetTransposeReordering,
dest_states_tensor, num_states);
stat.eplased_per_iter *= 1e6; // from seconds to microseconds
return stat;
}
static void RegisterBenchmarkGetTransposeReordering(DeviceType device_type) {
std::vector<int32_t> problems_sizes = {10, 20, 30, 50, 100, 200, 300, 500};
for (auto s : problems_sizes) {
std::string name =
GenerateBenchmarkName<int32_t>("GetTransposeReordering", device_type);
RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat {
return BenchmarkGetTransposeReordering(s, device_type);
});
}
}
static void RunRaggedOpsBenchmark() {
PrintEnvironmentInfo();
RegisterBenchmarkGetTransposeReordering(kCpu);
RegisterBenchmarkGetTransposeReordering(kCuda);
// Users can set a regular expression via environment
// variable `K2_BENCHMARK_FILTER` such that only benchmarks
// with name matching the pattern are candidates to run.
const char *filter = std::getenv("K2_BENCHMARK_FILTER");
if (filter != nullptr) FilterRegisteredBenchmarks(filter);
std::vector<BenchmarkRun> results = RunBechmarks();
std::cout << BenchmarkRun::GetFieldsName() << "\n";
for (const auto &r : results) {
std::cout << r << "\n";
}
}
} // namespace k2
int main() {
k2::RunRaggedOpsBenchmark();
return 0;
}
| c47ae64dd135970b353b51e626bc54bf16b68c79.cu | /**
* @brief Benchmarks for ragged_ops.
*
* @copyright
* Copyright (c) 2020 Xiaomi Corporation (authors: Fangjun Kuang)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <cstdlib>
#include "k2/csrc/benchmark/benchmark.h"
#include "k2/csrc/fsa_utils.h"
#include "k2/csrc/ragged_ops.h"
namespace k2 {
static BenchmarkStat BenchmarkGetTransposeReordering(int32_t dim,
DeviceType device_type) {
ContextPtr context;
if (device_type == kCpu) {
context = GetCpuContext();
} else {
K2_CHECK_EQ(device_type, kCuda);
context = GetCudaContext();
}
int32_t num_iter = std::min(100, 10000 / dim);
int32_t min_num_fsas = dim;
int32_t max_num_fsas = dim * 2;
bool acyclic = false;
int32_t max_symbol = 100;
int32_t min_num_arcs = min_num_fsas * 10;
int32_t max_num_arcs = max_num_fsas * 20;
FsaVec fsas = RandomFsaVec(min_num_fsas, max_num_fsas, acyclic, max_symbol,
min_num_arcs, max_num_arcs);
fsas = fsas.To(context);
Array1<int32_t> dest_states = GetDestStates(fsas, true);
Ragged<int32_t> dest_states_tensor(fsas.shape, dest_states);
int32_t num_fsas = fsas.TotSize(0);
int32_t num_states = fsas.TotSize(1);
int32_t num_arcs = fsas.TotSize(2);
BenchmarkStat stat;
stat.op_name = "GetTransposeReordering_" + std::to_string(num_fsas) + "_" +
std::to_string(num_states) + "_" + std::to_string(num_arcs);
stat.num_iter = num_iter;
stat.problem_size = dim;
stat.dtype_name = TraitsOf(DtypeOf<int32_t>::dtype).Name();
stat.device_type = device_type;
stat.eplased_per_iter =
BenchmarkOp(num_iter, context, &GetTransposeReordering,
dest_states_tensor, num_states);
stat.eplased_per_iter *= 1e6; // from seconds to microseconds
return stat;
}
static void RegisterBenchmarkGetTransposeReordering(DeviceType device_type) {
std::vector<int32_t> problems_sizes = {10, 20, 30, 50, 100, 200, 300, 500};
for (auto s : problems_sizes) {
std::string name =
GenerateBenchmarkName<int32_t>("GetTransposeReordering", device_type);
RegisterBenchmark(name, [s, device_type]() -> BenchmarkStat {
return BenchmarkGetTransposeReordering(s, device_type);
});
}
}
static void RunRaggedOpsBenchmark() {
PrintEnvironmentInfo();
RegisterBenchmarkGetTransposeReordering(kCpu);
RegisterBenchmarkGetTransposeReordering(kCuda);
// Users can set a regular expression via environment
// variable `K2_BENCHMARK_FILTER` such that only benchmarks
// with name matching the pattern are candidates to run.
const char *filter = std::getenv("K2_BENCHMARK_FILTER");
if (filter != nullptr) FilterRegisteredBenchmarks(filter);
std::vector<BenchmarkRun> results = RunBechmarks();
std::cout << BenchmarkRun::GetFieldsName() << "\n";
for (const auto &r : results) {
std::cout << r << "\n";
}
}
} // namespace k2
int main() {
k2::RunRaggedOpsBenchmark();
return 0;
}
|
9b10fb427c7b3e35300b2285278d92940ddb8204.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
* Yang Wang
* Department of ECE
* University of Toronto
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <math.h>
__global__ void ADD(float * A, float*O, int N, float x)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
{
O[i] = A[i] + x;
}
}
int main(void)
{
int N = 32 * 1024 * 256;
double input = 32;
float x = 10.0;
printf("%s\n%s\n%s\n", "FIRSTNAME: Yang", "LASTNAME: Wang",
"E-MAIL: [email protected]");
printf("%-15s%-15s%-15s%-15s%-15s\n", "Elements(M)", "Block_size", "CPUtoGPU(ms)",
"Kernel(ms)", "GPUtoCPU(ms)");
for(double threadsPerBlock = 16; threadsPerBlock < pow(2.0, 15); threadsPerBlock *= 2)
{
size_t size = N * sizeof(float);
float * hA = (float *) malloc(size);
float * hO = (float *) malloc(size);
float * dA;
hipMalloc((void**) &dA, size);
float * dO;
hipMalloc((void**) &dO, size);
hipEvent_t start, start_1, start_2;
hipEvent_t end, end_1, end_2;
hipEventCreate(&start);
hipEventCreate(&start_1);
hipEventCreate(&start_2);
hipEventCreate(&end);
hipEventCreate(&end_1);
hipEventCreate(&end_2);
for (int i = 0; i < N; i++) {
hA[i] = rand() / (float) RAND_MAX;
}
hipEventRecord(start);
hipMemcpy(dA, hA, size, hipMemcpyHostToDevice);
hipEventRecord(end);
hipEventSynchronize(end);
float eTime = 0;
hipEventElapsedTime(&eTime, start, end);
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
hipEventRecord(start_1);
hipLaunchKernelGGL(( ADD), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dA, dO, N,x);
hipEventRecord(end_1);
hipDeviceSynchronize();
hipEventSynchronize(end_1);
float eTime1 = 0;
hipEventElapsedTime(&eTime1, start_1, end_1);
hipEventRecord(start_2);
hipMemcpy(hO, dO, size, hipMemcpyDeviceToHost);
hipEventRecord(end_2);
hipEventSynchronize(end_2);
float eTime2 = 0;
hipEventElapsedTime(&eTime2, start_2, end_2);
hipFree(dA);
hipFree(dO);
free(hA);
free(hO);
hipEventDestroy(start);
hipEventDestroy(end);
hipEventDestroy(start_1);
hipEventDestroy(end_1);
hipEventDestroy(start_2);
hipEventDestroy(end_2);
printf("%-15.0f%-15.0f%-15f%-15f%-15f\n", input, threadsPerBlock, eTime,
eTime1, eTime2);
}
}
| 9b10fb427c7b3e35300b2285278d92940ddb8204.cu | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
* Yang Wang
* Department of ECE
* University of Toronto
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <math.h>
__global__ void ADD(float * A, float*O, int N, float x)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
{
O[i] = A[i] + x;
}
}
int main(void)
{
int N = 32 * 1024 * 256;
double input = 32;
float x = 10.0;
printf("%s\n%s\n%s\n", "FIRSTNAME: Yang", "LASTNAME: Wang",
"E-MAIL: [email protected]");
printf("%-15s%-15s%-15s%-15s%-15s\n", "Elements(M)", "Block_size", "CPUtoGPU(ms)",
"Kernel(ms)", "GPUtoCPU(ms)");
for(double threadsPerBlock = 16; threadsPerBlock < pow(2.0, 15); threadsPerBlock *= 2)
{
size_t size = N * sizeof(float);
float * hA = (float *) malloc(size);
float * hO = (float *) malloc(size);
float * dA;
cudaMalloc((void**) &dA, size);
float * dO;
cudaMalloc((void**) &dO, size);
cudaEvent_t start, start_1, start_2;
cudaEvent_t end, end_1, end_2;
cudaEventCreate(&start);
cudaEventCreate(&start_1);
cudaEventCreate(&start_2);
cudaEventCreate(&end);
cudaEventCreate(&end_1);
cudaEventCreate(&end_2);
for (int i = 0; i < N; i++) {
hA[i] = rand() / (float) RAND_MAX;
}
cudaEventRecord(start);
cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice);
cudaEventRecord(end);
cudaEventSynchronize(end);
float eTime = 0;
cudaEventElapsedTime(&eTime, start, end);
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
cudaEventRecord(start_1);
ADD<<<blocksPerGrid, threadsPerBlock>>>(dA, dO, N,x);
cudaEventRecord(end_1);
cudaDeviceSynchronize();
cudaEventSynchronize(end_1);
float eTime1 = 0;
cudaEventElapsedTime(&eTime1, start_1, end_1);
cudaEventRecord(start_2);
cudaMemcpy(hO, dO, size, cudaMemcpyDeviceToHost);
cudaEventRecord(end_2);
cudaEventSynchronize(end_2);
float eTime2 = 0;
cudaEventElapsedTime(&eTime2, start_2, end_2);
cudaFree(dA);
cudaFree(dO);
free(hA);
free(hO);
cudaEventDestroy(start);
cudaEventDestroy(end);
cudaEventDestroy(start_1);
cudaEventDestroy(end_1);
cudaEventDestroy(start_2);
cudaEventDestroy(end_2);
printf("%-15.0f%-15.0f%-15f%-15f%-15f\n", input, threadsPerBlock, eTime,
eTime1, eTime2);
}
}
|
31cb007cccf76dd110b1476dd6712baa90d93e6f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <test_utils.h>
#include <cuda_utils.cuh>
#include <iostream>
#include <vector>
#include "selection/knn.cuh"
namespace MLCommon {
namespace Selection {
/**
*
* NOTE: Not exhaustively testing the kNN implementation since
* we are using FAISS for this. Just testing API to verify the
* knn.cu class is accepting inputs and providing outputs as
* expected.
*/
template <typename T>
class KNNTest : public ::testing::Test {
protected:
void basicTest() {
auto alloc = std::make_shared<defaultDeviceAllocator>();
// Allocate input
allocate(d_train_inputs, n * d);
// Allocate reference arrays
allocate<long>(d_ref_I, n * n);
allocate(d_ref_D, n * n);
// Allocate predicted arrays
allocate<long>(d_pred_I, n * n);
allocate(d_pred_D, n * n);
// make testdata on host
std::vector<T> h_train_inputs = {1.0, 50.0, 51.0};
h_train_inputs.resize(n);
updateDevice(d_train_inputs, h_train_inputs.data(), n * d, 0);
std::vector<T> h_res_D = {0.0, 49.0, 50.0, 0.0, 1.0, 49.0, 0.0, 1.0, 50.0};
h_res_D.resize(n * n);
updateDevice(d_ref_D, h_res_D.data(), n * n, 0);
std::vector<long> h_res_I = {0, 1, 2, 1, 2, 0, 2, 1, 0};
h_res_I.resize(n * n);
updateDevice<long>(d_ref_I, h_res_I.data(), n * n, 0);
float **ptrs = new float *[1];
int *sizes = new int[1];
ptrs[0] = d_train_inputs;
sizes[0] = n;
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
brute_force_knn(ptrs, sizes, 1, d, d_train_inputs, n, d_pred_I, d_pred_D, n,
alloc, stream);
CUDA_CHECK(hipStreamDestroy(stream));
}
void SetUp() override { basicTest(); }
void TearDown() override {
CUDA_CHECK(hipFree(d_train_inputs));
CUDA_CHECK(hipFree(d_pred_I));
CUDA_CHECK(hipFree(d_pred_D));
CUDA_CHECK(hipFree(d_ref_I));
CUDA_CHECK(hipFree(d_ref_D));
}
protected:
T *d_train_inputs;
int n = 3;
int d = 1;
long *d_pred_I;
T *d_pred_D;
long *d_ref_I;
T *d_ref_D;
};
typedef KNNTest<float> KNNTestF;
TEST_F(KNNTestF, Fit) {
ASSERT_TRUE(devArrMatch(d_ref_D, d_pred_D, n * n, Compare<float>()));
ASSERT_TRUE(devArrMatch(d_ref_I, d_pred_I, n * n, Compare<long>()));
}
}; // end namespace Selection
}; // namespace MLCommon
| 31cb007cccf76dd110b1476dd6712baa90d93e6f.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <test_utils.h>
#include <cuda_utils.cuh>
#include <iostream>
#include <vector>
#include "selection/knn.cuh"
namespace MLCommon {
namespace Selection {
/**
*
* NOTE: Not exhaustively testing the kNN implementation since
* we are using FAISS for this. Just testing API to verify the
* knn.cu class is accepting inputs and providing outputs as
* expected.
*/
template <typename T>
class KNNTest : public ::testing::Test {
protected:
void basicTest() {
auto alloc = std::make_shared<defaultDeviceAllocator>();
// Allocate input
allocate(d_train_inputs, n * d);
// Allocate reference arrays
allocate<long>(d_ref_I, n * n);
allocate(d_ref_D, n * n);
// Allocate predicted arrays
allocate<long>(d_pred_I, n * n);
allocate(d_pred_D, n * n);
// make testdata on host
std::vector<T> h_train_inputs = {1.0, 50.0, 51.0};
h_train_inputs.resize(n);
updateDevice(d_train_inputs, h_train_inputs.data(), n * d, 0);
std::vector<T> h_res_D = {0.0, 49.0, 50.0, 0.0, 1.0, 49.0, 0.0, 1.0, 50.0};
h_res_D.resize(n * n);
updateDevice(d_ref_D, h_res_D.data(), n * n, 0);
std::vector<long> h_res_I = {0, 1, 2, 1, 2, 0, 2, 1, 0};
h_res_I.resize(n * n);
updateDevice<long>(d_ref_I, h_res_I.data(), n * n, 0);
float **ptrs = new float *[1];
int *sizes = new int[1];
ptrs[0] = d_train_inputs;
sizes[0] = n;
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
brute_force_knn(ptrs, sizes, 1, d, d_train_inputs, n, d_pred_I, d_pred_D, n,
alloc, stream);
CUDA_CHECK(cudaStreamDestroy(stream));
}
void SetUp() override { basicTest(); }
void TearDown() override {
CUDA_CHECK(cudaFree(d_train_inputs));
CUDA_CHECK(cudaFree(d_pred_I));
CUDA_CHECK(cudaFree(d_pred_D));
CUDA_CHECK(cudaFree(d_ref_I));
CUDA_CHECK(cudaFree(d_ref_D));
}
protected:
T *d_train_inputs;
int n = 3;
int d = 1;
long *d_pred_I;
T *d_pred_D;
long *d_ref_I;
T *d_ref_D;
};
typedef KNNTest<float> KNNTestF;
TEST_F(KNNTestF, Fit) {
ASSERT_TRUE(devArrMatch(d_ref_D, d_pred_D, n * n, Compare<float>()));
ASSERT_TRUE(devArrMatch(d_ref_I, d_pred_I, n * n, Compare<long>()));
}
}; // end namespace Selection
}; // namespace MLCommon
|
e5114714d741b119c947be2ecb74640ccb10d845.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void _bcnn_forward_softmax_layer_kernel(int n, int batch, float *input, float *output) {
float sum = 0.f;
float maxf = -INFINITY;
int b = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (b >= batch) {
return;
}
for (int i = 0; i < n; ++i) {
int val = input[i + b * n];
maxf = (val > maxf) ? val : maxf;
}
for (int i = 0; i < n; ++i) {
sum += exp(input[i + b * n] - maxf);
}
sum = (sum != 0) ? maxf + log(sum) : maxf - 100.f;
for (int i = 0; i < n; ++i) {
output[i + b * n] = exp(input[i + b * n] - sum);
}
} | e5114714d741b119c947be2ecb74640ccb10d845.cu | #include "includes.h"
__global__ void _bcnn_forward_softmax_layer_kernel(int n, int batch, float *input, float *output) {
float sum = 0.f;
float maxf = -INFINITY;
int b = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (b >= batch) {
return;
}
for (int i = 0; i < n; ++i) {
int val = input[i + b * n];
maxf = (val > maxf) ? val : maxf;
}
for (int i = 0; i < n; ++i) {
sum += exp(input[i + b * n] - maxf);
}
sum = (sum != 0) ? maxf + log(sum) : maxf - 100.f;
for (int i = 0; i < n; ++i) {
output[i + b * n] = exp(input[i + b * n] - sum);
}
} |
c3096150c8f963a8f67e696845d0f9629e76e24c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "zeros.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *field = NULL;
hipMalloc(&field, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
zeros), dim3(gridBlock),dim3(threadBlock), 0, 0, field,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
zeros), dim3(gridBlock),dim3(threadBlock), 0, 0, field,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
zeros), dim3(gridBlock),dim3(threadBlock), 0, 0, field,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c3096150c8f963a8f67e696845d0f9629e76e24c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "zeros.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *field = NULL;
cudaMalloc(&field, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
zeros<<<gridBlock,threadBlock>>>(field,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
zeros<<<gridBlock,threadBlock>>>(field,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
zeros<<<gridBlock,threadBlock>>>(field,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
76f5ab8cb2a7f1ce469c16f8efc7bcd0df470f33.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "./headers.h"
#include "./left_looking_kernel.cu"
int main()
{
FILE *fptr;
fptr = fopen("./input.txt", "r");
hipError_t err = hipSuccess;
int devCount;
hipGetDeviceCount(&devCount);
hipDeviceProp_t devp;
hipGetDeviceProperties(&devp, 0);
int INPUT_SIZE = 0;
fscanf(fptr, "%d", &INPUT_SIZE);
size_t size = INPUT_SIZE * INPUT_SIZE * (sizeof(float));
printf("Testing for matrix M [%dx%d]\n", INPUT_SIZE, INPUT_SIZE);
float *M = (float *)malloc(size);
if(M == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
printf("Reading input matrix: \n");
for(int i=0; i<INPUT_SIZE; i++)
{
for(int j=0; j<INPUT_SIZE; j++)
{
fscanf(fptr, "%f ", &M[i * INPUT_SIZE + j]);
}
}
// printf("Printing input matrix\n");
// for(int i=0; i<INPUT_SIZE; i++)
// {
// for(int j=0; j<INPUT_SIZE; j++)
// {
// printf("%f ", M[i * INPUT_SIZE + j]);
// }
// printf("\n");
// }
printf("\n\n");
float *d_M = NULL;
err = hipMalloc((void **)&d_M, size);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to allocate matrix M on the CUDA device! (error code %s)\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Copy the matrix M from host memory to CUDA device\n\n");
err = hipMemcpy(d_M, M, size, hipMemcpyHostToDevice);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy matrix M from host to device (error code %s)\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
dim3 grid(1, 1, 1);
dim3 block(TILE_SIZE, TILE_SIZE, 1);
// no of tiles in a column
int no_of_tiles = (INPUT_SIZE / TILE_SIZE) + (INPUT_SIZE % TILE_SIZE != 0); // ceil of (INPUT_SIZE / TILE_SIZE)
if(TILE_SIZE == INPUT_SIZE)
{
hipLaunchKernelGGL(( left_looking_kernel), dim3(grid), dim3(block), 1 * TILE_SIZE * (TILE_SIZE + 1) * sizeof(float), 0, d_M, INPUT_SIZE);
}
else if((no_of_tiles + 2) * TILE_SIZE * (TILE_SIZE + 1) * sizeof(float) < devp.sharedMemPerBlock)
{
hipLaunchKernelGGL(( left_looking_kernel), dim3(grid), dim3(block), (no_of_tiles + 2) * TILE_SIZE * (TILE_SIZE + 1) * sizeof(float), 0, d_M, INPUT_SIZE);
}
else
{
hipLaunchKernelGGL(( left_looking_kernel_less_mem), dim3(grid), dim3(block), 4 * TILE_SIZE * (TILE_SIZE + 1) * sizeof(float), 0, d_M, INPUT_SIZE);
}
err = hipGetLastError();
if(err != hipSuccess)
{
fprintf(stderr, "Failed to launch CUDA kernel (error code %s)\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(M, d_M, size, hipMemcpyDeviceToHost);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy the output matrix M from device to Host (error code %s)\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// printf("Printing output matrix\n");
// for(int i=0; i<INPUT_SIZE; i++)
// {
// for(int j=0; j<INPUT_SIZE; j++)
// {
// printf("%f ", M[i * INPUT_SIZE + j]);
// }
// printf("\n");
// }
err = hipFree(d_M);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to free device matrix M (error code %s)\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
free(M);
err = hipDeviceReset();
if(err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the CUDA device (error code %s)\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("DONE!\n");
} | 76f5ab8cb2a7f1ce469c16f8efc7bcd0df470f33.cu | #include "./headers.h"
#include "./left_looking_kernel.cu"
int main()
{
FILE *fptr;
fptr = fopen("./input.txt", "r");
cudaError_t err = cudaSuccess;
int devCount;
cudaGetDeviceCount(&devCount);
cudaDeviceProp devp;
cudaGetDeviceProperties(&devp, 0);
int INPUT_SIZE = 0;
fscanf(fptr, "%d", &INPUT_SIZE);
size_t size = INPUT_SIZE * INPUT_SIZE * (sizeof(float));
printf("Testing for matrix M [%dx%d]\n", INPUT_SIZE, INPUT_SIZE);
float *M = (float *)malloc(size);
if(M == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
printf("Reading input matrix: \n");
for(int i=0; i<INPUT_SIZE; i++)
{
for(int j=0; j<INPUT_SIZE; j++)
{
fscanf(fptr, "%f ", &M[i * INPUT_SIZE + j]);
}
}
// printf("Printing input matrix\n");
// for(int i=0; i<INPUT_SIZE; i++)
// {
// for(int j=0; j<INPUT_SIZE; j++)
// {
// printf("%f ", M[i * INPUT_SIZE + j]);
// }
// printf("\n");
// }
printf("\n\n");
float *d_M = NULL;
err = cudaMalloc((void **)&d_M, size);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate matrix M on the CUDA device! (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Copy the matrix M from host memory to CUDA device\n\n");
err = cudaMemcpy(d_M, M, size, cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy matrix M from host to device (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
dim3 grid(1, 1, 1);
dim3 block(TILE_SIZE, TILE_SIZE, 1);
// no of tiles in a column
int no_of_tiles = (INPUT_SIZE / TILE_SIZE) + (INPUT_SIZE % TILE_SIZE != 0); // ceil of (INPUT_SIZE / TILE_SIZE)
if(TILE_SIZE == INPUT_SIZE)
{
left_looking_kernel<<<grid, block, 1 * TILE_SIZE * (TILE_SIZE + 1) * sizeof(float)>>>(d_M, INPUT_SIZE);
}
else if((no_of_tiles + 2) * TILE_SIZE * (TILE_SIZE + 1) * sizeof(float) < devp.sharedMemPerBlock)
{
left_looking_kernel<<<grid, block, (no_of_tiles + 2) * TILE_SIZE * (TILE_SIZE + 1) * sizeof(float)>>>(d_M, INPUT_SIZE);
}
else
{
left_looking_kernel_less_mem<<<grid, block, 4 * TILE_SIZE * (TILE_SIZE + 1) * sizeof(float)>>>(d_M, INPUT_SIZE);
}
err = cudaGetLastError();
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to launch CUDA kernel (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(M, d_M, size, cudaMemcpyDeviceToHost);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy the output matrix M from device to Host (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// printf("Printing output matrix\n");
// for(int i=0; i<INPUT_SIZE; i++)
// {
// for(int j=0; j<INPUT_SIZE; j++)
// {
// printf("%f ", M[i * INPUT_SIZE + j]);
// }
// printf("\n");
// }
err = cudaFree(d_M);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to free device matrix M (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
free(M);
err = cudaDeviceReset();
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the CUDA device (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("DONE!\n");
} |
7d78869a887c36f2a81ed00e3395a249340a2923.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
void __global__ ifftshiftc(float2 *f, int N, int Ntheta, int Nz) {
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx >= N || ty >= Ntheta || tz >= Nz)
return;
int g = (1 - 2 * ((tx + 1) % 2));
int f_ind = tx + tz * N + ty * N * Nz;
f[f_ind].x *= g;
f[f_ind].y *= g;
}
void __global__ ifftshiftcmul(float2 *f, int N, int Ntheta, int Nz) {
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx >= N || ty >= Ntheta || tz >= Nz)
return;
int f_ind = tx + tz * N + ty * N * Nz;
f[f_ind].x *= -1;
f[f_ind].y *= -1;
}
void __global__ fftshiftc(float2 *f, int N, int Nz) {
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx >= N || ty >= N || tz >= Nz)
return;
int g = (1 - 2 * ((tx + 1) % 2)) * (1 - 2 * ((ty + 1) % 2));
f[tx + ty * N + tz * N * N].x *= g;
f[tx + ty * N + tz * N * N].y *= g;
}
void __global__ takeshift(float2 *shift, float c, int N) {
int tx = blockDim.x * blockIdx.x + threadIdx.x;
if (tx >= N)
return;
shift[tx].x = __cosf(2 * PI * c * (tx - N / 2.0) / N);
shift[tx].y = __sinf(2 * PI * c * (tx - N / 2.0) / N);
}
void __global__ shift(float2 *f, float2 *shift, int N, int Ntheta, int Nz) {
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx >= N || ty >= Ntheta || tz >= Nz)
return;
float cr = shift[tx].x;
float ci = shift[tx].y;
int f_ind = tx + tz * N + ty * N * Nz;
float2 f0;
f0.x = f[f_ind].x;
f0.y = f[f_ind].y;
f[f_ind].x = f0.x * cr - f0.y * ci;
f[f_ind].y = f0.x * ci + f0.y * cr;
}
void __global__ fftshiftc2d(float2 *f, int det, int ntheta) {
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx >= det || ty >= det || tz >= ntheta)
return;
int g = (1 - 2 * ((tx + 1) % 2))*(1 - 2 * ((ty + 1) % 2));
int f_ind = tx + ty * det + tz * det * det;
f[f_ind].x *= g;
f[f_ind].y *= g;
}
void __global__ fftshiftc3d(float2 *f, int n0, int n1, int n2) {
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx >= n0 || ty >= n1 || tz >= n2)
return;
int g = (1 - 2 * ((tx + 1) % 2)) * (1 - 2 * ((ty + 1) % 2))* (1 - 2 * ((tz + 1) % 2));
f[tx + ty * n0 + tz * n0 * n1].x *= g;
f[tx + ty * n0 + tz * n0 * n1].y *= g;
} | 7d78869a887c36f2a81ed00e3395a249340a2923.cu | void __global__ ifftshiftc(float2 *f, int N, int Ntheta, int Nz) {
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx >= N || ty >= Ntheta || tz >= Nz)
return;
int g = (1 - 2 * ((tx + 1) % 2));
int f_ind = tx + tz * N + ty * N * Nz;
f[f_ind].x *= g;
f[f_ind].y *= g;
}
void __global__ ifftshiftcmul(float2 *f, int N, int Ntheta, int Nz) {
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx >= N || ty >= Ntheta || tz >= Nz)
return;
int f_ind = tx + tz * N + ty * N * Nz;
f[f_ind].x *= -1;
f[f_ind].y *= -1;
}
void __global__ fftshiftc(float2 *f, int N, int Nz) {
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx >= N || ty >= N || tz >= Nz)
return;
int g = (1 - 2 * ((tx + 1) % 2)) * (1 - 2 * ((ty + 1) % 2));
f[tx + ty * N + tz * N * N].x *= g;
f[tx + ty * N + tz * N * N].y *= g;
}
void __global__ takeshift(float2 *shift, float c, int N) {
int tx = blockDim.x * blockIdx.x + threadIdx.x;
if (tx >= N)
return;
shift[tx].x = __cosf(2 * PI * c * (tx - N / 2.0) / N);
shift[tx].y = __sinf(2 * PI * c * (tx - N / 2.0) / N);
}
void __global__ shift(float2 *f, float2 *shift, int N, int Ntheta, int Nz) {
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx >= N || ty >= Ntheta || tz >= Nz)
return;
float cr = shift[tx].x;
float ci = shift[tx].y;
int f_ind = tx + tz * N + ty * N * Nz;
float2 f0;
f0.x = f[f_ind].x;
f0.y = f[f_ind].y;
f[f_ind].x = f0.x * cr - f0.y * ci;
f[f_ind].y = f0.x * ci + f0.y * cr;
}
void __global__ fftshiftc2d(float2 *f, int det, int ntheta) {
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx >= det || ty >= det || tz >= ntheta)
return;
int g = (1 - 2 * ((tx + 1) % 2))*(1 - 2 * ((ty + 1) % 2));
int f_ind = tx + ty * det + tz * det * det;
f[f_ind].x *= g;
f[f_ind].y *= g;
}
void __global__ fftshiftc3d(float2 *f, int n0, int n1, int n2) {
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx >= n0 || ty >= n1 || tz >= n2)
return;
int g = (1 - 2 * ((tx + 1) % 2)) * (1 - 2 * ((ty + 1) % 2))* (1 - 2 * ((tz + 1) % 2));
f[tx + ty * n0 + tz * n0 * n1].x *= g;
f[tx + ty * n0 + tz * n0 * n1].y *= g;
} |
4d7bbb36278db2fe338fdd8a46dacb1ac6411ee6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from magma_zmcsrcompressor_gpu.cu normal z -> d, Fri Jan 30 19:00:29 2015
@author Hartwig Anzt
*/
#include "common_magma.h"
#include "magmasparse.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE1 256
#define BLOCK_SIZE2 1
#else
#define BLOCK_SIZE1 256
#define BLOCK_SIZE2 1
#endif
// copy nonzeros into new structure
__global__ void
magma_dmcsrgpu_kernel1( int num_rows,
double *A_val,
magma_index_t *A_rowptr,
magma_index_t *A_colind,
double *B_val,
magma_index_t *B_rowptr,
magma_index_t *B_colind ){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
double zero = MAGMA_D_ZERO;
int start = A_rowptr[ row ];
int new_location = start;
int end = A_rowptr[ row+1 ];
for( j=start; j<end; j++ ){
if( A_val[j] != zero ){
// B_val[new_location] = A_val[j];
// B_colind[new_location] = A_colind[j];
new_location++;
}
}
// this is not a correctr rowpointer! this is nn_z in this row!
B_rowptr[ row ] = new_location-start;
}
}
// generate a valid rowpointer
__global__ void
magma_dmcsrgpu_kernel2( int num_rows,
magma_index_t *B_rowptr,
magma_index_t *A_rowptr ){
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int j, nnz = 0;
if( idx == 0 ){
A_rowptr[ 0 ] = nnz;
for( j=0; j<num_rows; j++ ){
nnz+=B_rowptr[ j ];
A_rowptr[ j+1 ] = nnz;
}
}
}
// copy new structure into original matrix
__global__ void
magma_dmcsrgpu_kernel3( int num_rows,
double *B_val,
magma_index_t *B_rowptr,
magma_index_t *B_colind,
magma_index_t *B2_rowptr,
double *A_val,
magma_index_t *A_rowptr,
magma_index_t *A_colind
){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j, new_location;
if(row<num_rows){
new_location = A_rowptr[ row ];
int start = B2_rowptr[ row ];
int end = B2_rowptr[ row+1 ];
double zero = MAGMA_D_ZERO;
for( j=start; j<end; j++ ){
if( A_val[j] != zero ){
B_val[new_location] = A_val[j];
B_colind[new_location] = A_colind[j];
new_location++;
}
// A_val[ j ] = B_val[ j ];
// A_colind[ j ] = B_colind[ j ];
}
}
}
/**
Purpose
-------
Removes zeros in a CSR matrix. This is a GPU implementation of the
CSR compressor.
Arguments
---------
@param
A magma_d_sparse_matrix*
input/output matrix
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
********************************************************************/
extern "C" magma_int_t
magma_dmcsrcompressor_gpu(
magma_d_sparse_matrix *A,
magma_queue_t queue )
{
if ( A->memory_location == Magma_DEV && A->storage_type == Magma_CSR ) {
magma_int_t stat_cpu = 0, stat_dev = 0;
magma_d_sparse_matrix B, B2;
B.val = NULL;
B.col = NULL;
B.row = NULL;
B.rowidx = NULL;
B.blockinfo = NULL;
B.diag = NULL;
B.dval = NULL;
B.dcol = NULL;
B.drow = NULL;
B.drowidx = NULL;
B.ddiag = NULL;
B2.val = NULL;
B2.col = NULL;
B2.row = NULL;
B2.rowidx = NULL;
B2.blockinfo = NULL;
B2.diag = NULL;
B2.dval = NULL;
B2.dcol = NULL;
B2.drow = NULL;
B2.drowidx = NULL;
B2.ddiag = NULL;
stat_dev += magma_index_malloc( &B.drow, A->num_rows + 1 );
stat_dev += magma_index_malloc( &B2.drow, A->num_rows + 1 );
if( stat_dev != 0 ){
magma_d_mfree( &B, queue );
magma_d_mfree( &B2, queue );
return MAGMA_ERR_DEVICE_ALLOC;
}
magma_index_copyvector( (A->num_rows+1), A->drow, 1, B2.drow, 1 );
dim3 grid1( (A->num_rows+BLOCK_SIZE1-1)/BLOCK_SIZE1, 1, 1);
// copying the nonzeros into B and write in B.drow how many there are
hipLaunchKernelGGL(( magma_dmcsrgpu_kernel1), dim3(grid1), dim3(BLOCK_SIZE1), 0, queue ,
A->num_rows, A->dval, A->drow, A->dcol, B.dval, B.drow, B.dcol );
// correct the row pointer
dim3 grid2( 1, 1, 1);
hipLaunchKernelGGL(( magma_dmcsrgpu_kernel2), dim3(grid2), dim3(BLOCK_SIZE2), 0, queue ,
A->num_rows, B.drow, A->drow );
// access the true number of nonzeros
magma_index_t *cputmp;
stat_cpu += magma_index_malloc_cpu( &cputmp, 1 );
if( stat_cpu != 0 ){
magma_free_cpu( cputmp );
magma_d_mfree( &B, queue );
magma_d_mfree( &B2, queue );
return MAGMA_ERR_HOST_ALLOC;
}
magma_index_getvector( 1, A->row+(A->num_rows), 1, cputmp, 1 );
A->nnz = (magma_int_t) cputmp[0];
// reallocate with right size
stat_dev += magma_dmalloc( &B.dval, A->nnz );
stat_dev += magma_index_malloc( &B.dcol, A->nnz );
if( stat_dev != 0 ){
magma_d_mfree( &B, queue );
magma_d_mfree( &B2, queue );
return MAGMA_ERR_DEVICE_ALLOC;
}
// copy correct values back
hipLaunchKernelGGL(( magma_dmcsrgpu_kernel3), dim3(grid1), dim3(BLOCK_SIZE1), 0, queue ,
A->num_rows, B.dval, B.drow, B.dcol, B2.drow, A->dval, A->drow, A->dcol );
magma_free( A->dcol );
magma_free( A->dval );
A->dcol = B.dcol;
A->dval = B.dval;
magma_free( B2.drow );
magma_free( B.drow );
return MAGMA_SUCCESS;
}
else {
magma_d_sparse_matrix dA, CSRA;
magma_storage_t A_storage = A->storage_type;
magma_location_t A_location = A->memory_location;
magma_d_mconvert( *A, &CSRA, A->storage_type, Magma_CSR, queue );
magma_d_mtransfer( *A, &dA, A->memory_location, Magma_DEV, queue );
magma_dmcsrcompressor_gpu( &dA, queue );
magma_d_mfree( &dA, queue );
magma_d_mfree( A, queue );
magma_d_mtransfer( dA, &CSRA, Magma_DEV, A_location, queue );
magma_d_mconvert( CSRA, A, Magma_CSR, A_storage, queue );
magma_d_mfree( &dA, queue );
magma_d_mfree( &CSRA, queue );
return MAGMA_SUCCESS;
}
}
| 4d7bbb36278db2fe338fdd8a46dacb1ac6411ee6.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from magma_zmcsrcompressor_gpu.cu normal z -> d, Fri Jan 30 19:00:29 2015
@author Hartwig Anzt
*/
#include "common_magma.h"
#include "magmasparse.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE1 256
#define BLOCK_SIZE2 1
#else
#define BLOCK_SIZE1 256
#define BLOCK_SIZE2 1
#endif
// copy nonzeros into new structure
__global__ void
magma_dmcsrgpu_kernel1( int num_rows,
double *A_val,
magma_index_t *A_rowptr,
magma_index_t *A_colind,
double *B_val,
magma_index_t *B_rowptr,
magma_index_t *B_colind ){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
double zero = MAGMA_D_ZERO;
int start = A_rowptr[ row ];
int new_location = start;
int end = A_rowptr[ row+1 ];
for( j=start; j<end; j++ ){
if( A_val[j] != zero ){
// B_val[new_location] = A_val[j];
// B_colind[new_location] = A_colind[j];
new_location++;
}
}
// this is not a correctr rowpointer! this is nn_z in this row!
B_rowptr[ row ] = new_location-start;
}
}
// generate a valid rowpointer
__global__ void
magma_dmcsrgpu_kernel2( int num_rows,
magma_index_t *B_rowptr,
magma_index_t *A_rowptr ){
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int j, nnz = 0;
if( idx == 0 ){
A_rowptr[ 0 ] = nnz;
for( j=0; j<num_rows; j++ ){
nnz+=B_rowptr[ j ];
A_rowptr[ j+1 ] = nnz;
}
}
}
// copy new structure into original matrix
__global__ void
magma_dmcsrgpu_kernel3( int num_rows,
double *B_val,
magma_index_t *B_rowptr,
magma_index_t *B_colind,
magma_index_t *B2_rowptr,
double *A_val,
magma_index_t *A_rowptr,
magma_index_t *A_colind
){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j, new_location;
if(row<num_rows){
new_location = A_rowptr[ row ];
int start = B2_rowptr[ row ];
int end = B2_rowptr[ row+1 ];
double zero = MAGMA_D_ZERO;
for( j=start; j<end; j++ ){
if( A_val[j] != zero ){
B_val[new_location] = A_val[j];
B_colind[new_location] = A_colind[j];
new_location++;
}
// A_val[ j ] = B_val[ j ];
// A_colind[ j ] = B_colind[ j ];
}
}
}
/**
Purpose
-------
Removes zeros in a CSR matrix. This is a GPU implementation of the
CSR compressor.
Arguments
---------
@param
A magma_d_sparse_matrix*
input/output matrix
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
********************************************************************/
extern "C" magma_int_t
magma_dmcsrcompressor_gpu(
magma_d_sparse_matrix *A,
magma_queue_t queue )
{
if ( A->memory_location == Magma_DEV && A->storage_type == Magma_CSR ) {
magma_int_t stat_cpu = 0, stat_dev = 0;
magma_d_sparse_matrix B, B2;
B.val = NULL;
B.col = NULL;
B.row = NULL;
B.rowidx = NULL;
B.blockinfo = NULL;
B.diag = NULL;
B.dval = NULL;
B.dcol = NULL;
B.drow = NULL;
B.drowidx = NULL;
B.ddiag = NULL;
B2.val = NULL;
B2.col = NULL;
B2.row = NULL;
B2.rowidx = NULL;
B2.blockinfo = NULL;
B2.diag = NULL;
B2.dval = NULL;
B2.dcol = NULL;
B2.drow = NULL;
B2.drowidx = NULL;
B2.ddiag = NULL;
stat_dev += magma_index_malloc( &B.drow, A->num_rows + 1 );
stat_dev += magma_index_malloc( &B2.drow, A->num_rows + 1 );
if( stat_dev != 0 ){
magma_d_mfree( &B, queue );
magma_d_mfree( &B2, queue );
return MAGMA_ERR_DEVICE_ALLOC;
}
magma_index_copyvector( (A->num_rows+1), A->drow, 1, B2.drow, 1 );
dim3 grid1( (A->num_rows+BLOCK_SIZE1-1)/BLOCK_SIZE1, 1, 1);
// copying the nonzeros into B and write in B.drow how many there are
magma_dmcsrgpu_kernel1<<< grid1, BLOCK_SIZE1, 0, queue >>>
( A->num_rows, A->dval, A->drow, A->dcol, B.dval, B.drow, B.dcol );
// correct the row pointer
dim3 grid2( 1, 1, 1);
magma_dmcsrgpu_kernel2<<< grid2, BLOCK_SIZE2, 0, queue >>>
( A->num_rows, B.drow, A->drow );
// access the true number of nonzeros
magma_index_t *cputmp;
stat_cpu += magma_index_malloc_cpu( &cputmp, 1 );
if( stat_cpu != 0 ){
magma_free_cpu( cputmp );
magma_d_mfree( &B, queue );
magma_d_mfree( &B2, queue );
return MAGMA_ERR_HOST_ALLOC;
}
magma_index_getvector( 1, A->row+(A->num_rows), 1, cputmp, 1 );
A->nnz = (magma_int_t) cputmp[0];
// reallocate with right size
stat_dev += magma_dmalloc( &B.dval, A->nnz );
stat_dev += magma_index_malloc( &B.dcol, A->nnz );
if( stat_dev != 0 ){
magma_d_mfree( &B, queue );
magma_d_mfree( &B2, queue );
return MAGMA_ERR_DEVICE_ALLOC;
}
// copy correct values back
magma_dmcsrgpu_kernel3<<< grid1, BLOCK_SIZE1, 0, queue >>>
( A->num_rows, B.dval, B.drow, B.dcol, B2.drow, A->dval, A->drow, A->dcol );
magma_free( A->dcol );
magma_free( A->dval );
A->dcol = B.dcol;
A->dval = B.dval;
magma_free( B2.drow );
magma_free( B.drow );
return MAGMA_SUCCESS;
}
else {
magma_d_sparse_matrix dA, CSRA;
magma_storage_t A_storage = A->storage_type;
magma_location_t A_location = A->memory_location;
magma_d_mconvert( *A, &CSRA, A->storage_type, Magma_CSR, queue );
magma_d_mtransfer( *A, &dA, A->memory_location, Magma_DEV, queue );
magma_dmcsrcompressor_gpu( &dA, queue );
magma_d_mfree( &dA, queue );
magma_d_mfree( A, queue );
magma_d_mtransfer( dA, &CSRA, Magma_DEV, A_location, queue );
magma_d_mconvert( CSRA, A, Magma_CSR, A_storage, queue );
magma_d_mfree( &dA, queue );
magma_d_mfree( &CSRA, queue );
return MAGMA_SUCCESS;
}
}
|
12148d02170eeb4888ee7dfaa5fbbb37a02c3d6a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "json_reader_impl.hpp"
#include <hip/hip_runtime.h>
#include <algorithm>
#include <iostream>
#include <map>
#include <memory>
#include <numeric>
#include <string>
#include <vector>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/host_vector.h>
#include <nvstrings/NVStrings.h>
#include <cudf/cudf.h>
#include <utilities/cudf_utils.h>
#include <utilities/error_utils.hpp>
#include <utilities/type_dispatcher.hpp>
#include <io/comp/io_uncomp.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <io/cuio_common.hpp>
#include <io/utilities/parsing_utils.cuh>
#include <io/utilities/wrapper_utils.hpp>
namespace cudf {
using string_pair = std::pair<const char *, size_t>;
JsonReader::Impl::Impl(json_reader_args const &args) : args_(args) {
// Check if the passed arguments are supported
CUDF_EXPECTS(args_.lines, "Only Json Lines format is currently supported.\n");
d_true_trie_ = createSerializedTrie({"true"});
opts_.trueValuesTrie = d_true_trie_.data().get();
d_false_trie_ = createSerializedTrie({"false"});
opts_.falseValuesTrie = d_false_trie_.data().get();
d_na_trie_ = createSerializedTrie({"null"});
opts_.naValuesTrie = d_na_trie_.data().get();
}
/**---------------------------------------------------------------------------*
* @brief Estimates the maximum expected length or a row, based on the number
* of columns
*
* If the number of columns is not available, it will return a value large
* enough for most use cases
*
* @param[in] num_columns Number of columns in the JSON file (optional)
*
* @return Estimated maximum size of a row, in bytes
*---------------------------------------------------------------------------**/
constexpr size_t calculateMaxRowSize(int num_columns = 0) noexcept {
constexpr size_t max_row_bytes = 16 * 1024; // 16KB
constexpr size_t column_bytes = 64;
constexpr size_t base_padding = 1024; // 1KB
if (num_columns == 0) {
// Use flat size if the number of columns is not known
return max_row_bytes;
} else {
// Expand the size based on the number of columns, if available
return base_padding + num_columns * column_bytes;
}
}
table JsonReader::Impl::read() {
ingestRawInput();
CUDF_EXPECTS(input_data_ != nullptr, "Ingest failed: input data is null.\n");
CUDF_EXPECTS(input_size_ != 0, "Ingest failed: input data has zero size.\n");
decompressInput();
CUDF_EXPECTS(uncomp_data_ != nullptr, "Ingest failed: uncompressed input data is null.\n");
CUDF_EXPECTS(uncomp_size_ != 0, "Ingest failed: uncompressed input data has zero size.\n");
setRecordStarts();
CUDF_EXPECTS(!rec_starts_.empty(), "Error enumerating records.\n");
uploadDataToDevice();
CUDF_EXPECTS(!d_data_.empty(), "Error uploading input data to the GPU.\n");
setColumnNames();
CUDF_EXPECTS(!column_names_.empty(), "Error determining column names.\n");
setDataTypes();
CUDF_EXPECTS(!dtypes_.empty(), "Error in data type detection.\n");
convertDataToColumns();
CUDF_EXPECTS(!columns_.empty(), "Error converting json input into gdf columns.\n");
// Transfer ownership to raw pointer output
std::vector<gdf_column *> out_cols(columns_.size());
for (size_t i = 0; i < columns_.size(); ++i) {
out_cols[i] = columns_[i].release();
}
return table(out_cols.data(), out_cols.size());
}
table JsonReader::Impl::read_byte_range(size_t offset, size_t size){
byte_range_offset_ = offset;
byte_range_size_ = size;
return read();
}
/**---------------------------------------------------------------------------*
* @brief Infer the compression type from the compression parameter and
* the input data.
*
* Returns "none" if the input is not compressed.
* Throws if the input is not not valid.
*
* @param[in] compression_arg Input string that is potentially describing
* the compression type. Can also be "none" or "infer".
* @param[in] source_type Enum describing the type of the data source
* @param[in] source If source_type is FILE_PATH, contains the filepath.
* If source_type is HOST_BUFFER, contains the input JSON data.
*
* @return string representing the compression type.
*---------------------------------------------------------------------------**/
std::string inferCompressionType(const std::string &compression_arg, gdf_input_type source_type,
const std::string &source) {
auto str_tolower = [](const auto &begin, const auto &end) {
std::string out;
std::transform(begin, end, std::back_inserter(out), ::tolower);
return out;
};
const std::string comp_arg_lower = str_tolower(compression_arg.begin(), compression_arg.end());
if (comp_arg_lower != "infer") {
return comp_arg_lower;
}
// Cannot infer compression type from a buffer, assume the input is uncompressed
if (source_type == gdf_csv_input_form::HOST_BUFFER) {
return "none";
}
// Need to infer compression from the file extension
const auto ext_start = std::find(source.rbegin(), source.rend(), '.').base();
const std::string file_ext = str_tolower(ext_start, source.end());
if (file_ext == "json")
return "none";
if (file_ext == "gz")
return "gzip";
if (file_ext == "zip")
return "zip";
if (file_ext == "bz2")
return "bz2";
if (file_ext == "xz")
return "xz";
// None of the supported compression types match
CUDF_FAIL("Invalid compression argument");
}
void JsonReader::Impl::ingestRawInput() {
if (args_.source_type == gdf_csv_input_form::FILE_PATH) {
map_file_ = std::make_unique<MappedFile>(args_.source.c_str(), O_RDONLY);
CUDF_EXPECTS(map_file_->size() > 0, "Input file is empty.\n");
CUDF_EXPECTS(byte_range_offset_ < map_file_->size(), "byte_range offset is too big for the input size.\n");
// Have to align map offset to page size
const auto page_size = sysconf(_SC_PAGESIZE);
size_t map_offset = (byte_range_offset_ / page_size) * page_size;
// Set to rest-of-the-file size, will reduce based on the byte range size
size_t map_size = map_file_->size() - map_offset;
// Include the page padding in the mapped size
const size_t page_padding = byte_range_offset_ - map_offset;
const size_t padded_byte_range_size = byte_range_size_ + page_padding;
if (byte_range_size_ != 0 && padded_byte_range_size < map_size) {
// Need to make sure that w/ padding we don't overshoot the end of file
map_size = min(padded_byte_range_size + calculateMaxRowSize(args_.dtype.size()), map_size);
}
map_file_->map(map_size, map_offset);
input_data_ = static_cast<const char *>(map_file_->data()) + page_padding;
// Ignore page padding for parsing purposes
input_size_ = map_size - page_padding;
} else if (args_.source_type == gdf_csv_input_form::HOST_BUFFER) {
input_data_ = args_.source.c_str() + byte_range_offset_;
input_size_ = args_.source.size() - byte_range_offset_;
} else {
CUDF_FAIL("Invalid input type");
}
}
void JsonReader::Impl::decompressInput() {
const std::string compression_type = inferCompressionType(args_.compression, args_.source_type, args_.source);
if (compression_type == "none") {
// Do not use the owner vector here to avoid copying the whole file to the heap
uncomp_data_ = input_data_;
uncomp_size_ = input_size_;
} else {
CUDF_EXPECTS(getUncompressedHostData(input_data_, input_size_, compression_type, uncomp_data_owner_) == GDF_SUCCESS,
"Input data decompression failed.\n");
uncomp_data_ = uncomp_data_owner_.data();
uncomp_size_ = uncomp_data_owner_.size();
}
}
void JsonReader::Impl::setRecordStarts() {
std::vector<char> chars_to_count{'\n'};
// Currently, ignoring lineterminations within quotes is handled by recording the records of both,
// and then filtering out the records that is a quotechar or a linetermination within a quotechar pair.
if (allow_newlines_in_strings_) {
chars_to_count.push_back('\"');
}
// If not starting at an offset, add an extra row to account for the first row in the file
const auto prefilter_count =
countAllFromSet(uncomp_data_, uncomp_size_, chars_to_count) + ((byte_range_offset_ == 0) ? 1 : 0);
rec_starts_ = device_buffer<uint64_t>(prefilter_count);
auto *find_result_ptr = rec_starts_.data();
// Manually adding an extra row to account for the first row in the file
if (byte_range_offset_ == 0) {
find_result_ptr++;
CUDA_TRY(hipMemsetAsync(rec_starts_.data(), 0ull, sizeof(uint64_t)));
}
std::vector<char> chars_to_find{'\n'};
if (allow_newlines_in_strings_) {
chars_to_find.push_back('\"');
}
// Passing offset = 1 to return positions AFTER the found character
findAllFromSet(uncomp_data_, uncomp_size_, chars_to_find, 1, find_result_ptr);
// Previous call stores the record pinput_file.typeositions as encountered by all threads
// Sort the record positions as subsequent processing may require filtering
// certain rows or other processing on specific records
thrust::sort(rmm::exec_policy()->on(0), rec_starts_.data(), rec_starts_.data() + prefilter_count);
auto filtered_count = prefilter_count;
if (allow_newlines_in_strings_) {
std::vector<uint64_t> h_rec_starts(prefilter_count);
CUDA_TRY(
hipMemcpy(h_rec_starts.data(), rec_starts_.data(), sizeof(uint64_t) * prefilter_count, hipMemcpyDefault));
bool quotation = false;
for (gdf_size_type i = 1; i < prefilter_count; ++i) {
if (uncomp_data_[h_rec_starts[i] - 1] == '\"') {
quotation = !quotation;
h_rec_starts[i] = uncomp_size_;
filtered_count--;
} else if (quotation) {
h_rec_starts[i] = uncomp_size_;
filtered_count--;
}
}
CUDA_TRY(hipMemcpy(rec_starts_.data(), h_rec_starts.data(), prefilter_count, hipMemcpyHostToDevice));
thrust::sort(rmm::exec_policy()->on(0), rec_starts_.data(), rec_starts_.data() + prefilter_count);
}
// Exclude the ending newline as it does not precede a record start
if (uncomp_data_[uncomp_size_ - 1] == '\n') {
filtered_count--;
}
rec_starts_.resize(filtered_count);
}
void JsonReader::Impl::uploadDataToDevice() {
size_t start_offset = 0;
size_t end_offset = uncomp_size_;
// Trim lines that are outside range
if (byte_range_size_ != 0 || byte_range_offset_ != 0) {
std::vector<uint64_t> h_rec_starts(rec_starts_.size());
CUDA_TRY(
hipMemcpy(h_rec_starts.data(), rec_starts_.data(), sizeof(uint64_t) * h_rec_starts.size(), hipMemcpyDefault));
if (byte_range_size_ != 0) {
auto it = h_rec_starts.end() - 1;
while (it >= h_rec_starts.begin() && *it > byte_range_size_) {
end_offset = *it;
--it;
}
h_rec_starts.erase(it + 1, h_rec_starts.end());
}
// Resize to exclude rows outside of the range; adjust row start positions to account for the data subcopy
start_offset = h_rec_starts.front();
rec_starts_.resize(h_rec_starts.size());
thrust::transform(rmm::exec_policy()->on(0), rec_starts_.data(), rec_starts_.data() + rec_starts_.size(),
thrust::make_constant_iterator(start_offset), rec_starts_.data(), thrust::minus<uint64_t>());
}
const size_t bytes_to_upload = end_offset - start_offset;
CUDF_EXPECTS(bytes_to_upload <= uncomp_size_, "Error finding the record within the specified byte range.\n");
// Upload the raw data that is within the rows of interest
d_data_ = device_buffer<char>(bytes_to_upload);
CUDA_TRY(hipMemcpy(d_data_.data(), uncomp_data_ + start_offset, bytes_to_upload, hipMemcpyHostToDevice));
}
/**---------------------------------------------------------------------------*
* @brief Extract value names from a JSON object
*
* @param[in] json_obj Host vector containing the JSON object
* @param[in] opts Parsing options (e.g. delimiter and quotation character)
*
* @return std::vector<std::string> names of JSON object values
*---------------------------------------------------------------------------**/
std::vector<std::string> getNamesFromJsonObject(const std::vector<char> &json_obj, const ParseOptions &opts) {
enum class ParseState { preColName, colName, postColName };
std::vector<std::string> names;
bool quotation = false;
auto state = ParseState::preColName;
int name_start = 0;
for (size_t pos = 0; pos < json_obj.size(); ++pos) {
if (state == ParseState::preColName) {
if (json_obj[pos] == opts.quotechar) {
name_start = pos + 1;
state = ParseState::colName;
continue;
}
} else if (state == ParseState::colName) {
if (json_obj[pos] == opts.quotechar && json_obj[pos - 1] != '\\') {
// if found a non-escaped quote character, it's the end of the column name
names.emplace_back(&json_obj[name_start], &json_obj[pos]);
state = ParseState::postColName;
continue;
}
} else if (state == ParseState::postColName) {
// TODO handle complex data types that might include unquoted commas
if (!quotation && json_obj[pos] == opts.delimiter) {
state = ParseState::preColName;
continue;
} else if (json_obj[pos] == opts.quotechar) {
quotation = !quotation;
}
}
}
return names;
}
void JsonReader::Impl::setColumnNames() {
// If file only contains one row, use the file size for the row size
uint64_t first_row_len = d_data_.size() / sizeof(char);
if (rec_starts_.size() > 1) {
// Set first_row_len to the offset of the second row, if it exists
CUDA_TRY(hipMemcpy(&first_row_len, rec_starts_.data() + 1, sizeof(uint64_t), hipMemcpyDefault));
}
std::vector<char> first_row(first_row_len);
CUDA_TRY(hipMemcpy(first_row.data(), d_data_.data(), first_row_len * sizeof(char), hipMemcpyDefault));
// Determine the row format between:
// JSON array - [val1, val2, ...] and
// JSON object - {"col1":val1, "col2":val2, ...}
// based on the top level opening bracket
const auto first_square_bracket = std::find(first_row.begin(), first_row.end(), '[');
const auto first_curly_bracket = std::find(first_row.begin(), first_row.end(), '{');
CUDF_EXPECTS(first_curly_bracket != first_row.end() || first_square_bracket != first_row.end(),
"Input data is not a valid JSON file.");
// If the first opening bracket is '{', assume object format
const bool is_object = first_curly_bracket < first_square_bracket;
if (is_object) {
column_names_ = getNamesFromJsonObject(first_row, opts_);
} else {
int cols_found = 0;
bool quotation = false;
for (size_t pos = 0; pos < first_row.size(); ++pos) {
// Flip the quotation flag if current character is a quotechar
if (first_row[pos] == opts_.quotechar) {
quotation = !quotation;
}
// Check if end of a column/row
else if (pos == first_row.size() - 1 || (!quotation && first_row[pos] == opts_.delimiter)) {
column_names_.emplace_back(std::to_string(cols_found++));
}
}
}
}
void JsonReader::Impl::convertDataToColumns() {
const auto num_columns = dtypes_.size();
for (size_t col = 0; col < num_columns; ++col) {
columns_.emplace_back(rec_starts_.size(), dtypes_[col], gdf_dtype_extra_info{TIME_UNIT_NONE}, column_names_[col]);
CUDF_EXPECTS(columns_.back().allocate() == GDF_SUCCESS, "Cannot allocate columns.\n");
}
thrust::host_vector<gdf_dtype> h_dtypes(num_columns);
thrust::host_vector<void *> h_data(num_columns);
thrust::host_vector<gdf_valid_type *> h_valid(num_columns);
for (size_t i = 0; i < num_columns; ++i) {
h_dtypes[i] = columns_[i]->dtype;
h_data[i] = columns_[i]->data;
h_valid[i] = columns_[i]->valid;
}
rmm::device_vector<gdf_dtype> d_dtypes = h_dtypes;
rmm::device_vector<void *> d_data = h_data;
rmm::device_vector<gdf_valid_type *> d_valid = h_valid;
rmm::device_vector<gdf_size_type> d_valid_counts(num_columns, 0);
convertJsonToColumns(d_dtypes.data().get(), d_data.data().get(), d_valid.data().get(), d_valid_counts.data().get());
CUDA_TRY(hipDeviceSynchronize());
CUDA_TRY(hipGetLastError());
thrust::host_vector<gdf_size_type> h_valid_counts = d_valid_counts;
for (size_t i = 0; i < num_columns; ++i) {
columns_[i]->null_count = columns_[i]->size - h_valid_counts[i];
}
// Handle string columns
for (auto &column : columns_) {
if (column->dtype == GDF_STRING) {
auto str_list = static_cast<string_pair *>(column->data);
auto str_data = NVStrings::create_from_index(str_list, column->size);
RMM_FREE(std::exchange(column->data, str_data), 0);
}
}
}
/**---------------------------------------------------------------------------*
* @brief Functor for converting plain text data to cuDF data type value.
*---------------------------------------------------------------------------**/
struct ConvertFunctor {
/**---------------------------------------------------------------------------*
* @brief Template specialization for operator() for types whose values can be
* convertible to a 0 or 1 to represent false/true. The converting is done by
* checking against the default and user-specified true/false values list.
*
* It is handled here rather than within convertStrToValue() as that function
* is used by other types (ex. timestamp) that aren't 'booleable'.
*---------------------------------------------------------------------------**/
template <typename T, typename std::enable_if_t<std::is_integral<T>::value> * = nullptr>
__host__ __device__ __forceinline__ void operator()(const char *data, void *gdf_columns, long row, long start,
long end, const ParseOptions &opts) {
T &value{static_cast<T *>(gdf_columns)[row]};
// Check for user-specified true/false values first, where the output is
// replaced with 1/0 respectively
const size_t field_len = end - start + 1;
if (serializedTrieContains(opts.trueValuesTrie, data + start, field_len)) {
value = 1;
} else if (serializedTrieContains(opts.falseValuesTrie, data + start, field_len)) {
value = 0;
} else {
value = convertStrToValue<T>(data, start, end, opts);
}
}
/**---------------------------------------------------------------------------*
* @brief Default template operator() dispatch specialization all data types
* (including wrapper types) that is not covered by above.
*---------------------------------------------------------------------------**/
template <typename T, typename std::enable_if_t<!std::is_integral<T>::value> * = nullptr>
__host__ __device__ __forceinline__ void operator()(const char *data, void *gdf_columns, long row, long start,
long end, const ParseOptions &opts) {
T &value{static_cast<T *>(gdf_columns)[row]};
value = convertStrToValue<T>(data, start, end, opts);
}
};
/**---------------------------------------------------------------------------*
* @brief CUDA Kernel that modifies the start and stop offsets to exclude
* the sections outside of the top level brackets.
*
* The top level brackets characters are excluded from the resulting range.
* Parameter stop has the same semantics as end() in STL containers
* (one past the last element)
*
* @param[in] data Pointer to the device buffer containing the data to process
* @param[in,out] start Offset of the first character in the range
* @param[in,out] stop Offset of the first character after the range
*
* @return void
*---------------------------------------------------------------------------**/
__device__ void limitRangeToBrackets(const char *data, long &start, long &stop) {
while (start < stop && data[start] != '[' && data[start] != '{') {
start++;
}
start++;
while (start < stop && data[stop - 1] != ']' && data[stop - 1] != '}') {
stop--;
}
stop--;
}
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that finds the end position of the next field name,
* including the colon that separates the name from the field value.
*
* Returns the position after the colon that preceeds the value token.
*
* @param[in] data Pointer to the device buffer containing the data to process
* @param[in] opts Parsing options (e.g. delimiter and quotation character)
* @param[in] start Offset of the first character in the range
* @param[in] stop Offset of the first character after the range
*
* @return long Position of the first character after the field name.
*---------------------------------------------------------------------------**/
__device__ long seekFieldNameEnd(const char *data, const ParseOptions opts, long start, long stop) {
bool quotation = false;
for (auto pos = start; pos < stop; ++pos) {
// Ignore escaped quotes
if (data[pos] == opts.quotechar && data[pos - 1] != '\\') {
quotation = !quotation;
} else if (!quotation && data[pos] == ':') {
return pos + 1;
}
}
return stop;
}
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that parses and converts plain text data into cuDF column data.
*
* Data is processed one record at a time
*
* @param[in] data The entire data to read
* @param[in] data_size Size of the data buffer, in bytes
* @param[in] rec_starts The start of each data record
* @param[in] num_records The number of lines/rows
* @param[in] dtypes The data type of each column
* @param[in] opts A set of parsing options
* @param[out] gdf_columns The output column data
* @param[in] num_columns The number of columns
* @param[out] valid_fields The bitmaps indicating whether column fields are valid
* @param[out] num_valid_fields The numbers of valid fields in columns
*
* @return void
*---------------------------------------------------------------------------**/
__global__ void convertJsonToGdf(const char *data, size_t data_size, const uint64_t *rec_starts,
gdf_size_type num_records, const gdf_dtype *dtypes, ParseOptions opts,
void *const *gdf_columns, int num_columns, gdf_valid_type *const *valid_fields,
gdf_size_type *num_valid_fields) {
const long rec_id = threadIdx.x + (blockDim.x * blockIdx.x);
if (rec_id >= num_records)
return;
long start = rec_starts[rec_id];
// has the same semantics as end() in STL containers (one past last element)
long stop = ((rec_id < num_records - 1) ? rec_starts[rec_id + 1] : data_size);
limitRangeToBrackets(data, start, stop);
const bool is_object = (data[start - 1] == '{');
for (int col = 0; col < num_columns && start < stop; col++) {
if (is_object) {
start = seekFieldNameEnd(data, opts, start, stop);
}
// field_end is at the next delimiter/newline
const long field_end = seekFieldEnd(data, opts, start, stop);
long field_data_last = field_end - 1;
// Modify start & end to ignore whitespace and quotechars
adjustForWhitespaceAndQuotes(data, &start, &field_data_last, opts.quotechar);
// Empty fields are not legal values
if (start <= field_data_last &&
!serializedTrieContains(opts.naValuesTrie, data + start, field_end - start)) {
// Type dispatcher does not handle GDF_STRINGS
if (dtypes[col] == gdf_dtype::GDF_STRING) {
auto str_list = static_cast<string_pair *>(gdf_columns[col]);
str_list[rec_id].first = data + start;
str_list[rec_id].second = field_data_last - start + 1;
} else {
cudf::type_dispatcher(dtypes[col], ConvertFunctor{}, data, gdf_columns[col], rec_id, start, field_data_last,
opts);
}
// set the valid bitmap - all bits were set to 0 to start
setBitmapBit(valid_fields[col], rec_id);
atomicAdd(&num_valid_fields[col], 1);
} else if (dtypes[col] == gdf_dtype::GDF_STRING) {
auto str_list = static_cast<string_pair *>(gdf_columns[col]);
str_list[rec_id].first = nullptr;
str_list[rec_id].second = 0;
}
start = field_end + 1;
}
}
void JsonReader::Impl::convertJsonToColumns(gdf_dtype *const dtypes, void *const *gdf_columns,
gdf_valid_type *const *valid_fields, gdf_size_type *num_valid_fields) {
int block_size;
int min_grid_size;
CUDA_TRY(hipOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, convertJsonToGdf));
const int grid_size = (rec_starts_.size() + block_size - 1) / block_size;
hipLaunchKernelGGL(( convertJsonToGdf), dim3(grid_size), dim3(block_size), 0, 0, d_data_.data(), d_data_.size(), rec_starts_.data(), rec_starts_.size(),
dtypes, opts_, gdf_columns, columns_.size(), valid_fields,
num_valid_fields);
CUDA_TRY(hipGetLastError());
}
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that parses and converts data into cuDF column data.
*
* Data is processed in one row/record at a time, so the number of total
* threads (tid) is equal to the number of rows.
*
* @param[in] data The entire plain text data to read
* @param[in] data_size Size of the data buffer, in bytes
* @param[in] opts A set of parsing options
* @param[in] num_columns The number of columns of input data
* @param[in] rec_starts The start the input data of interest
* @param[in] num_records The number of lines/rows of input data
* @param[out] column_infos The count for each column data type
*
* @returns void
*---------------------------------------------------------------------------**/
__global__ void detectJsonDataTypes(const char *data, size_t data_size, const ParseOptions opts, int num_columns,
const uint64_t *rec_starts, gdf_size_type num_records,
ColumnInfo *column_infos) {
long rec_id = threadIdx.x + (blockDim.x * blockIdx.x);
if (rec_id >= num_records)
return;
long start = rec_starts[rec_id];
// has the same semantics as end() in STL containers (one past last element)
long stop = ((rec_id < num_records - 1) ? rec_starts[rec_id + 1] : data_size);
limitRangeToBrackets(data, start, stop);
const bool is_object = (data[start - 1] == '{');
for (int col = 0; col < num_columns; col++) {
if (is_object) {
start = seekFieldNameEnd(data, opts, start, stop);
}
const long field_end = seekFieldEnd(data, opts, start, stop);
long field_data_last = field_end - 1;
adjustForWhitespaceAndQuotes(data, &start, &field_data_last);
const int field_len = field_data_last - start + 1;
// Checking if the field is empty
if (start > field_data_last ||
serializedTrieContains(opts.naValuesTrie, data + start, field_len)) {
atomicAdd(&column_infos[col].null_count, 1);
start = field_end + 1;
continue;
}
int digit_count = 0;
int decimal_count = 0;
int slash_count = 0;
int dash_count = 0;
int colon_count = 0;
int exponent_count = 0;
int other_count = 0;
const bool maybe_hex = ((field_len > 2 && data[start] == '0' && data[start + 1] == 'x') ||
(field_len > 3 && data[start] == '-' && data[start + 1] == '0' && data[start + 2] == 'x'));
for (long pos = start; pos <= field_data_last; pos++) {
if (isDigit(data[pos], maybe_hex)) {
digit_count++;
continue;
}
// Looking for unique characters that will help identify column types
switch (data[pos]) {
case '.':
decimal_count++;
break;
case '-':
dash_count++;
break;
case '/':
slash_count++;
break;
case ':':
colon_count++;
break;
case 'e':
case 'E':
if (!maybe_hex && pos > start && pos < field_data_last)
exponent_count++;
break;
default:
other_count++;
break;
}
}
// Integers have to have the length of the string
int int_req_number_cnt = field_len;
// Off by one if they start with a minus sign
if (data[start] == '-' && field_len > 1) {
--int_req_number_cnt;
}
// Off by one if they are a hexadecimal number
if (maybe_hex) {
--int_req_number_cnt;
}
if (serializedTrieContains(opts.trueValuesTrie, data + start, field_len) ||
serializedTrieContains(opts.falseValuesTrie, data + start, field_len)) {
atomicAdd(&column_infos[col].bool_count, 1);
} else if (digit_count == int_req_number_cnt) {
atomicAdd(&column_infos[col].int_count, 1);
} else if (isLikeFloat(field_len, digit_count, decimal_count, dash_count, exponent_count)) {
atomicAdd(&column_infos[col].float_count, 1);
}
// A date-time field cannot have more than 3 non-special characters
// A number field cannot have more than one decimal point
else if (other_count > 3 || decimal_count > 1) {
atomicAdd(&column_infos[col].string_count, 1);
} else {
// A date field can have either one or two '-' or '\'; A legal combination will only have one of them
// To simplify the process of auto column detection, we are not covering all the date-time formation permutations
if ((dash_count > 0 && dash_count <= 2 && slash_count == 0) ||
(dash_count == 0 && slash_count > 0 && slash_count <= 2)) {
if (colon_count <= 2) {
atomicAdd(&column_infos[col].datetime_count, 1);
} else {
atomicAdd(&column_infos[col].string_count, 1);
}
} else {
// Default field type is string
atomicAdd(&column_infos[col].string_count, 1);
}
}
start = field_end + 1;
}
}
void JsonReader::Impl::detectDataTypes(ColumnInfo *column_infos) {
int block_size;
int min_grid_size;
CUDA_TRY(hipOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, detectJsonDataTypes));
// Calculate actual block count to use based on records count
const int grid_size = (rec_starts_.size() + block_size - 1) / block_size;
hipLaunchKernelGGL(( detectJsonDataTypes), dim3(grid_size), dim3(block_size), 0, 0, d_data_.data(), d_data_.size(), opts_, column_names_.size(),
rec_starts_.data(), rec_starts_.size(), column_infos);
CUDA_TRY(hipGetLastError());
}
void JsonReader::Impl::setDataTypes() {
if (!args_.dtype.empty()) {
CUDF_EXPECTS(args_.dtype.size() == column_names_.size(), "Need to specify the type of each column.\n");
// Assume that the dtype is in dictionary format only if all elements contain a colon
const bool is_dict = std::all_of(args_.dtype.begin(), args_.dtype.end(), [](const std::string &s) {
return std::find(s.begin(), s.end(), ':') != s.end();
});
if (is_dict) {
std::map<std::string, gdf_dtype> col_type_map;
for (const auto &ts : args_.dtype) {
const size_t colon_idx = ts.find(":");
const std::string col_name(ts.begin(), ts.begin() + colon_idx);
const std::string type_str(ts.begin() + colon_idx + 1, ts.end());
col_type_map[col_name] = convertStringToDtype(type_str);
}
// Using the map here allows O(n log n) complexity
for (size_t col = 0; col < args_.dtype.size(); ++col) {
dtypes_.push_back(col_type_map[column_names_[col]]);
}
} else {
for (size_t col = 0; col < args_.dtype.size(); ++col) {
dtypes_.push_back(convertStringToDtype(args_.dtype[col]));
}
}
} else {
CUDF_EXPECTS(rec_starts_.size() != 0, "No data available for data type inference.\n");
const auto num_columns = column_names_.size();
rmm::device_vector<ColumnInfo> d_column_infos(num_columns, ColumnInfo{});
detectDataTypes(d_column_infos.data().get());
thrust::host_vector<ColumnInfo> h_column_infos = d_column_infos;
for (const auto &cinfo : h_column_infos) {
if (cinfo.null_count == static_cast<int>(rec_starts_.size())){
// Entire column is NULL; allocate the smallest amount of memory
dtypes_.push_back(GDF_INT8);
} else if (cinfo.string_count > 0) {
dtypes_.push_back(GDF_STRING);
} else if (cinfo.datetime_count > 0) {
dtypes_.push_back(GDF_DATE64);
} else if (cinfo.float_count > 0 ||
(cinfo.int_count > 0 && cinfo.null_count > 0)) {
dtypes_.push_back(GDF_FLOAT64);
} else if (cinfo.int_count > 0) {
dtypes_.push_back(GDF_INT64);
} else if (cinfo.bool_count > 0) {
dtypes_.push_back(GDF_BOOL8);
} else {
CUDF_FAIL("Data type detection failed.\n");
}
}
}
}
} // namespace cudf
| 12148d02170eeb4888ee7dfaa5fbbb37a02c3d6a.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "json_reader_impl.hpp"
#include <cuda_runtime.h>
#include <algorithm>
#include <iostream>
#include <map>
#include <memory>
#include <numeric>
#include <string>
#include <vector>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/host_vector.h>
#include <nvstrings/NVStrings.h>
#include <cudf/cudf.h>
#include <utilities/cudf_utils.h>
#include <utilities/error_utils.hpp>
#include <utilities/type_dispatcher.hpp>
#include <io/comp/io_uncomp.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <io/cuio_common.hpp>
#include <io/utilities/parsing_utils.cuh>
#include <io/utilities/wrapper_utils.hpp>
namespace cudf {
using string_pair = std::pair<const char *, size_t>;
JsonReader::Impl::Impl(json_reader_args const &args) : args_(args) {
// Check if the passed arguments are supported
CUDF_EXPECTS(args_.lines, "Only Json Lines format is currently supported.\n");
d_true_trie_ = createSerializedTrie({"true"});
opts_.trueValuesTrie = d_true_trie_.data().get();
d_false_trie_ = createSerializedTrie({"false"});
opts_.falseValuesTrie = d_false_trie_.data().get();
d_na_trie_ = createSerializedTrie({"null"});
opts_.naValuesTrie = d_na_trie_.data().get();
}
/**---------------------------------------------------------------------------*
* @brief Estimates the maximum expected length or a row, based on the number
* of columns
*
* If the number of columns is not available, it will return a value large
* enough for most use cases
*
* @param[in] num_columns Number of columns in the JSON file (optional)
*
* @return Estimated maximum size of a row, in bytes
*---------------------------------------------------------------------------**/
constexpr size_t calculateMaxRowSize(int num_columns = 0) noexcept {
constexpr size_t max_row_bytes = 16 * 1024; // 16KB
constexpr size_t column_bytes = 64;
constexpr size_t base_padding = 1024; // 1KB
if (num_columns == 0) {
// Use flat size if the number of columns is not known
return max_row_bytes;
} else {
// Expand the size based on the number of columns, if available
return base_padding + num_columns * column_bytes;
}
}
table JsonReader::Impl::read() {
ingestRawInput();
CUDF_EXPECTS(input_data_ != nullptr, "Ingest failed: input data is null.\n");
CUDF_EXPECTS(input_size_ != 0, "Ingest failed: input data has zero size.\n");
decompressInput();
CUDF_EXPECTS(uncomp_data_ != nullptr, "Ingest failed: uncompressed input data is null.\n");
CUDF_EXPECTS(uncomp_size_ != 0, "Ingest failed: uncompressed input data has zero size.\n");
setRecordStarts();
CUDF_EXPECTS(!rec_starts_.empty(), "Error enumerating records.\n");
uploadDataToDevice();
CUDF_EXPECTS(!d_data_.empty(), "Error uploading input data to the GPU.\n");
setColumnNames();
CUDF_EXPECTS(!column_names_.empty(), "Error determining column names.\n");
setDataTypes();
CUDF_EXPECTS(!dtypes_.empty(), "Error in data type detection.\n");
convertDataToColumns();
CUDF_EXPECTS(!columns_.empty(), "Error converting json input into gdf columns.\n");
// Transfer ownership to raw pointer output
std::vector<gdf_column *> out_cols(columns_.size());
for (size_t i = 0; i < columns_.size(); ++i) {
out_cols[i] = columns_[i].release();
}
return table(out_cols.data(), out_cols.size());
}
table JsonReader::Impl::read_byte_range(size_t offset, size_t size){
byte_range_offset_ = offset;
byte_range_size_ = size;
return read();
}
/**---------------------------------------------------------------------------*
* @brief Infer the compression type from the compression parameter and
* the input data.
*
* Returns "none" if the input is not compressed.
* Throws if the input is not not valid.
*
* @param[in] compression_arg Input string that is potentially describing
* the compression type. Can also be "none" or "infer".
* @param[in] source_type Enum describing the type of the data source
* @param[in] source If source_type is FILE_PATH, contains the filepath.
* If source_type is HOST_BUFFER, contains the input JSON data.
*
* @return string representing the compression type.
*---------------------------------------------------------------------------**/
std::string inferCompressionType(const std::string &compression_arg, gdf_input_type source_type,
const std::string &source) {
auto str_tolower = [](const auto &begin, const auto &end) {
std::string out;
std::transform(begin, end, std::back_inserter(out), ::tolower);
return out;
};
const std::string comp_arg_lower = str_tolower(compression_arg.begin(), compression_arg.end());
if (comp_arg_lower != "infer") {
return comp_arg_lower;
}
// Cannot infer compression type from a buffer, assume the input is uncompressed
if (source_type == gdf_csv_input_form::HOST_BUFFER) {
return "none";
}
// Need to infer compression from the file extension
const auto ext_start = std::find(source.rbegin(), source.rend(), '.').base();
const std::string file_ext = str_tolower(ext_start, source.end());
if (file_ext == "json")
return "none";
if (file_ext == "gz")
return "gzip";
if (file_ext == "zip")
return "zip";
if (file_ext == "bz2")
return "bz2";
if (file_ext == "xz")
return "xz";
// None of the supported compression types match
CUDF_FAIL("Invalid compression argument");
}
void JsonReader::Impl::ingestRawInput() {
if (args_.source_type == gdf_csv_input_form::FILE_PATH) {
map_file_ = std::make_unique<MappedFile>(args_.source.c_str(), O_RDONLY);
CUDF_EXPECTS(map_file_->size() > 0, "Input file is empty.\n");
CUDF_EXPECTS(byte_range_offset_ < map_file_->size(), "byte_range offset is too big for the input size.\n");
// Have to align map offset to page size
const auto page_size = sysconf(_SC_PAGESIZE);
size_t map_offset = (byte_range_offset_ / page_size) * page_size;
// Set to rest-of-the-file size, will reduce based on the byte range size
size_t map_size = map_file_->size() - map_offset;
// Include the page padding in the mapped size
const size_t page_padding = byte_range_offset_ - map_offset;
const size_t padded_byte_range_size = byte_range_size_ + page_padding;
if (byte_range_size_ != 0 && padded_byte_range_size < map_size) {
// Need to make sure that w/ padding we don't overshoot the end of file
map_size = min(padded_byte_range_size + calculateMaxRowSize(args_.dtype.size()), map_size);
}
map_file_->map(map_size, map_offset);
input_data_ = static_cast<const char *>(map_file_->data()) + page_padding;
// Ignore page padding for parsing purposes
input_size_ = map_size - page_padding;
} else if (args_.source_type == gdf_csv_input_form::HOST_BUFFER) {
input_data_ = args_.source.c_str() + byte_range_offset_;
input_size_ = args_.source.size() - byte_range_offset_;
} else {
CUDF_FAIL("Invalid input type");
}
}
void JsonReader::Impl::decompressInput() {
const std::string compression_type = inferCompressionType(args_.compression, args_.source_type, args_.source);
if (compression_type == "none") {
// Do not use the owner vector here to avoid copying the whole file to the heap
uncomp_data_ = input_data_;
uncomp_size_ = input_size_;
} else {
CUDF_EXPECTS(getUncompressedHostData(input_data_, input_size_, compression_type, uncomp_data_owner_) == GDF_SUCCESS,
"Input data decompression failed.\n");
uncomp_data_ = uncomp_data_owner_.data();
uncomp_size_ = uncomp_data_owner_.size();
}
}
void JsonReader::Impl::setRecordStarts() {
std::vector<char> chars_to_count{'\n'};
// Currently, ignoring lineterminations within quotes is handled by recording the records of both,
// and then filtering out the records that is a quotechar or a linetermination within a quotechar pair.
if (allow_newlines_in_strings_) {
chars_to_count.push_back('\"');
}
// If not starting at an offset, add an extra row to account for the first row in the file
const auto prefilter_count =
countAllFromSet(uncomp_data_, uncomp_size_, chars_to_count) + ((byte_range_offset_ == 0) ? 1 : 0);
rec_starts_ = device_buffer<uint64_t>(prefilter_count);
auto *find_result_ptr = rec_starts_.data();
// Manually adding an extra row to account for the first row in the file
if (byte_range_offset_ == 0) {
find_result_ptr++;
CUDA_TRY(cudaMemsetAsync(rec_starts_.data(), 0ull, sizeof(uint64_t)));
}
std::vector<char> chars_to_find{'\n'};
if (allow_newlines_in_strings_) {
chars_to_find.push_back('\"');
}
// Passing offset = 1 to return positions AFTER the found character
findAllFromSet(uncomp_data_, uncomp_size_, chars_to_find, 1, find_result_ptr);
// Previous call stores the record pinput_file.typeositions as encountered by all threads
// Sort the record positions as subsequent processing may require filtering
// certain rows or other processing on specific records
thrust::sort(rmm::exec_policy()->on(0), rec_starts_.data(), rec_starts_.data() + prefilter_count);
auto filtered_count = prefilter_count;
if (allow_newlines_in_strings_) {
std::vector<uint64_t> h_rec_starts(prefilter_count);
CUDA_TRY(
cudaMemcpy(h_rec_starts.data(), rec_starts_.data(), sizeof(uint64_t) * prefilter_count, cudaMemcpyDefault));
bool quotation = false;
for (gdf_size_type i = 1; i < prefilter_count; ++i) {
if (uncomp_data_[h_rec_starts[i] - 1] == '\"') {
quotation = !quotation;
h_rec_starts[i] = uncomp_size_;
filtered_count--;
} else if (quotation) {
h_rec_starts[i] = uncomp_size_;
filtered_count--;
}
}
CUDA_TRY(cudaMemcpy(rec_starts_.data(), h_rec_starts.data(), prefilter_count, cudaMemcpyHostToDevice));
thrust::sort(rmm::exec_policy()->on(0), rec_starts_.data(), rec_starts_.data() + prefilter_count);
}
// Exclude the ending newline as it does not precede a record start
if (uncomp_data_[uncomp_size_ - 1] == '\n') {
filtered_count--;
}
rec_starts_.resize(filtered_count);
}
void JsonReader::Impl::uploadDataToDevice() {
size_t start_offset = 0;
size_t end_offset = uncomp_size_;
// Trim lines that are outside range
if (byte_range_size_ != 0 || byte_range_offset_ != 0) {
std::vector<uint64_t> h_rec_starts(rec_starts_.size());
CUDA_TRY(
cudaMemcpy(h_rec_starts.data(), rec_starts_.data(), sizeof(uint64_t) * h_rec_starts.size(), cudaMemcpyDefault));
if (byte_range_size_ != 0) {
auto it = h_rec_starts.end() - 1;
while (it >= h_rec_starts.begin() && *it > byte_range_size_) {
end_offset = *it;
--it;
}
h_rec_starts.erase(it + 1, h_rec_starts.end());
}
// Resize to exclude rows outside of the range; adjust row start positions to account for the data subcopy
start_offset = h_rec_starts.front();
rec_starts_.resize(h_rec_starts.size());
thrust::transform(rmm::exec_policy()->on(0), rec_starts_.data(), rec_starts_.data() + rec_starts_.size(),
thrust::make_constant_iterator(start_offset), rec_starts_.data(), thrust::minus<uint64_t>());
}
const size_t bytes_to_upload = end_offset - start_offset;
CUDF_EXPECTS(bytes_to_upload <= uncomp_size_, "Error finding the record within the specified byte range.\n");
// Upload the raw data that is within the rows of interest
d_data_ = device_buffer<char>(bytes_to_upload);
CUDA_TRY(cudaMemcpy(d_data_.data(), uncomp_data_ + start_offset, bytes_to_upload, cudaMemcpyHostToDevice));
}
/**---------------------------------------------------------------------------*
* @brief Extract value names from a JSON object
*
* @param[in] json_obj Host vector containing the JSON object
* @param[in] opts Parsing options (e.g. delimiter and quotation character)
*
* @return std::vector<std::string> names of JSON object values
*---------------------------------------------------------------------------**/
std::vector<std::string> getNamesFromJsonObject(const std::vector<char> &json_obj, const ParseOptions &opts) {
enum class ParseState { preColName, colName, postColName };
std::vector<std::string> names;
bool quotation = false;
auto state = ParseState::preColName;
int name_start = 0;
for (size_t pos = 0; pos < json_obj.size(); ++pos) {
if (state == ParseState::preColName) {
if (json_obj[pos] == opts.quotechar) {
name_start = pos + 1;
state = ParseState::colName;
continue;
}
} else if (state == ParseState::colName) {
if (json_obj[pos] == opts.quotechar && json_obj[pos - 1] != '\\') {
// if found a non-escaped quote character, it's the end of the column name
names.emplace_back(&json_obj[name_start], &json_obj[pos]);
state = ParseState::postColName;
continue;
}
} else if (state == ParseState::postColName) {
// TODO handle complex data types that might include unquoted commas
if (!quotation && json_obj[pos] == opts.delimiter) {
state = ParseState::preColName;
continue;
} else if (json_obj[pos] == opts.quotechar) {
quotation = !quotation;
}
}
}
return names;
}
void JsonReader::Impl::setColumnNames() {
// If file only contains one row, use the file size for the row size
uint64_t first_row_len = d_data_.size() / sizeof(char);
if (rec_starts_.size() > 1) {
// Set first_row_len to the offset of the second row, if it exists
CUDA_TRY(cudaMemcpy(&first_row_len, rec_starts_.data() + 1, sizeof(uint64_t), cudaMemcpyDefault));
}
std::vector<char> first_row(first_row_len);
CUDA_TRY(cudaMemcpy(first_row.data(), d_data_.data(), first_row_len * sizeof(char), cudaMemcpyDefault));
// Determine the row format between:
// JSON array - [val1, val2, ...] and
// JSON object - {"col1":val1, "col2":val2, ...}
// based on the top level opening bracket
const auto first_square_bracket = std::find(first_row.begin(), first_row.end(), '[');
const auto first_curly_bracket = std::find(first_row.begin(), first_row.end(), '{');
CUDF_EXPECTS(first_curly_bracket != first_row.end() || first_square_bracket != first_row.end(),
"Input data is not a valid JSON file.");
// If the first opening bracket is '{', assume object format
const bool is_object = first_curly_bracket < first_square_bracket;
if (is_object) {
column_names_ = getNamesFromJsonObject(first_row, opts_);
} else {
int cols_found = 0;
bool quotation = false;
for (size_t pos = 0; pos < first_row.size(); ++pos) {
// Flip the quotation flag if current character is a quotechar
if (first_row[pos] == opts_.quotechar) {
quotation = !quotation;
}
// Check if end of a column/row
else if (pos == first_row.size() - 1 || (!quotation && first_row[pos] == opts_.delimiter)) {
column_names_.emplace_back(std::to_string(cols_found++));
}
}
}
}
void JsonReader::Impl::convertDataToColumns() {
const auto num_columns = dtypes_.size();
for (size_t col = 0; col < num_columns; ++col) {
columns_.emplace_back(rec_starts_.size(), dtypes_[col], gdf_dtype_extra_info{TIME_UNIT_NONE}, column_names_[col]);
CUDF_EXPECTS(columns_.back().allocate() == GDF_SUCCESS, "Cannot allocate columns.\n");
}
thrust::host_vector<gdf_dtype> h_dtypes(num_columns);
thrust::host_vector<void *> h_data(num_columns);
thrust::host_vector<gdf_valid_type *> h_valid(num_columns);
for (size_t i = 0; i < num_columns; ++i) {
h_dtypes[i] = columns_[i]->dtype;
h_data[i] = columns_[i]->data;
h_valid[i] = columns_[i]->valid;
}
rmm::device_vector<gdf_dtype> d_dtypes = h_dtypes;
rmm::device_vector<void *> d_data = h_data;
rmm::device_vector<gdf_valid_type *> d_valid = h_valid;
rmm::device_vector<gdf_size_type> d_valid_counts(num_columns, 0);
convertJsonToColumns(d_dtypes.data().get(), d_data.data().get(), d_valid.data().get(), d_valid_counts.data().get());
CUDA_TRY(cudaDeviceSynchronize());
CUDA_TRY(cudaGetLastError());
thrust::host_vector<gdf_size_type> h_valid_counts = d_valid_counts;
for (size_t i = 0; i < num_columns; ++i) {
columns_[i]->null_count = columns_[i]->size - h_valid_counts[i];
}
// Handle string columns
for (auto &column : columns_) {
if (column->dtype == GDF_STRING) {
auto str_list = static_cast<string_pair *>(column->data);
auto str_data = NVStrings::create_from_index(str_list, column->size);
RMM_FREE(std::exchange(column->data, str_data), 0);
}
}
}
/**---------------------------------------------------------------------------*
* @brief Functor for converting plain text data to cuDF data type value.
*---------------------------------------------------------------------------**/
struct ConvertFunctor {
/**---------------------------------------------------------------------------*
* @brief Template specialization for operator() for types whose values can be
* convertible to a 0 or 1 to represent false/true. The converting is done by
* checking against the default and user-specified true/false values list.
*
* It is handled here rather than within convertStrToValue() as that function
* is used by other types (ex. timestamp) that aren't 'booleable'.
*---------------------------------------------------------------------------**/
template <typename T, typename std::enable_if_t<std::is_integral<T>::value> * = nullptr>
__host__ __device__ __forceinline__ void operator()(const char *data, void *gdf_columns, long row, long start,
long end, const ParseOptions &opts) {
T &value{static_cast<T *>(gdf_columns)[row]};
// Check for user-specified true/false values first, where the output is
// replaced with 1/0 respectively
const size_t field_len = end - start + 1;
if (serializedTrieContains(opts.trueValuesTrie, data + start, field_len)) {
value = 1;
} else if (serializedTrieContains(opts.falseValuesTrie, data + start, field_len)) {
value = 0;
} else {
value = convertStrToValue<T>(data, start, end, opts);
}
}
/**---------------------------------------------------------------------------*
* @brief Default template operator() dispatch specialization all data types
* (including wrapper types) that is not covered by above.
*---------------------------------------------------------------------------**/
template <typename T, typename std::enable_if_t<!std::is_integral<T>::value> * = nullptr>
__host__ __device__ __forceinline__ void operator()(const char *data, void *gdf_columns, long row, long start,
long end, const ParseOptions &opts) {
T &value{static_cast<T *>(gdf_columns)[row]};
value = convertStrToValue<T>(data, start, end, opts);
}
};
/**---------------------------------------------------------------------------*
* @brief CUDA Kernel that modifies the start and stop offsets to exclude
* the sections outside of the top level brackets.
*
* The top level brackets characters are excluded from the resulting range.
* Parameter stop has the same semantics as end() in STL containers
* (one past the last element)
*
* @param[in] data Pointer to the device buffer containing the data to process
* @param[in,out] start Offset of the first character in the range
* @param[in,out] stop Offset of the first character after the range
*
* @return void
*---------------------------------------------------------------------------**/
__device__ void limitRangeToBrackets(const char *data, long &start, long &stop) {
while (start < stop && data[start] != '[' && data[start] != '{') {
start++;
}
start++;
while (start < stop && data[stop - 1] != ']' && data[stop - 1] != '}') {
stop--;
}
stop--;
}
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that finds the end position of the next field name,
* including the colon that separates the name from the field value.
*
* Returns the position after the colon that preceeds the value token.
*
* @param[in] data Pointer to the device buffer containing the data to process
* @param[in] opts Parsing options (e.g. delimiter and quotation character)
* @param[in] start Offset of the first character in the range
* @param[in] stop Offset of the first character after the range
*
* @return long Position of the first character after the field name.
*---------------------------------------------------------------------------**/
__device__ long seekFieldNameEnd(const char *data, const ParseOptions opts, long start, long stop) {
bool quotation = false;
for (auto pos = start; pos < stop; ++pos) {
// Ignore escaped quotes
if (data[pos] == opts.quotechar && data[pos - 1] != '\\') {
quotation = !quotation;
} else if (!quotation && data[pos] == ':') {
return pos + 1;
}
}
return stop;
}
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that parses and converts plain text data into cuDF column data.
*
* Data is processed one record at a time
*
* @param[in] data The entire data to read
* @param[in] data_size Size of the data buffer, in bytes
* @param[in] rec_starts The start of each data record
* @param[in] num_records The number of lines/rows
* @param[in] dtypes The data type of each column
* @param[in] opts A set of parsing options
* @param[out] gdf_columns The output column data
* @param[in] num_columns The number of columns
* @param[out] valid_fields The bitmaps indicating whether column fields are valid
* @param[out] num_valid_fields The numbers of valid fields in columns
*
* @return void
*---------------------------------------------------------------------------**/
__global__ void convertJsonToGdf(const char *data, size_t data_size, const uint64_t *rec_starts,
gdf_size_type num_records, const gdf_dtype *dtypes, ParseOptions opts,
void *const *gdf_columns, int num_columns, gdf_valid_type *const *valid_fields,
gdf_size_type *num_valid_fields) {
const long rec_id = threadIdx.x + (blockDim.x * blockIdx.x);
if (rec_id >= num_records)
return;
long start = rec_starts[rec_id];
// has the same semantics as end() in STL containers (one past last element)
long stop = ((rec_id < num_records - 1) ? rec_starts[rec_id + 1] : data_size);
limitRangeToBrackets(data, start, stop);
const bool is_object = (data[start - 1] == '{');
for (int col = 0; col < num_columns && start < stop; col++) {
if (is_object) {
start = seekFieldNameEnd(data, opts, start, stop);
}
// field_end is at the next delimiter/newline
const long field_end = seekFieldEnd(data, opts, start, stop);
long field_data_last = field_end - 1;
// Modify start & end to ignore whitespace and quotechars
adjustForWhitespaceAndQuotes(data, &start, &field_data_last, opts.quotechar);
// Empty fields are not legal values
if (start <= field_data_last &&
!serializedTrieContains(opts.naValuesTrie, data + start, field_end - start)) {
// Type dispatcher does not handle GDF_STRINGS
if (dtypes[col] == gdf_dtype::GDF_STRING) {
auto str_list = static_cast<string_pair *>(gdf_columns[col]);
str_list[rec_id].first = data + start;
str_list[rec_id].second = field_data_last - start + 1;
} else {
cudf::type_dispatcher(dtypes[col], ConvertFunctor{}, data, gdf_columns[col], rec_id, start, field_data_last,
opts);
}
// set the valid bitmap - all bits were set to 0 to start
setBitmapBit(valid_fields[col], rec_id);
atomicAdd(&num_valid_fields[col], 1);
} else if (dtypes[col] == gdf_dtype::GDF_STRING) {
auto str_list = static_cast<string_pair *>(gdf_columns[col]);
str_list[rec_id].first = nullptr;
str_list[rec_id].second = 0;
}
start = field_end + 1;
}
}
void JsonReader::Impl::convertJsonToColumns(gdf_dtype *const dtypes, void *const *gdf_columns,
gdf_valid_type *const *valid_fields, gdf_size_type *num_valid_fields) {
int block_size;
int min_grid_size;
CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, convertJsonToGdf));
const int grid_size = (rec_starts_.size() + block_size - 1) / block_size;
convertJsonToGdf<<<grid_size, block_size>>>(d_data_.data(), d_data_.size(), rec_starts_.data(), rec_starts_.size(),
dtypes, opts_, gdf_columns, columns_.size(), valid_fields,
num_valid_fields);
CUDA_TRY(cudaGetLastError());
}
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that parses and converts data into cuDF column data.
*
* Data is processed in one row/record at a time, so the number of total
* threads (tid) is equal to the number of rows.
*
* @param[in] data The entire plain text data to read
* @param[in] data_size Size of the data buffer, in bytes
* @param[in] opts A set of parsing options
* @param[in] num_columns The number of columns of input data
* @param[in] rec_starts The start the input data of interest
* @param[in] num_records The number of lines/rows of input data
* @param[out] column_infos The count for each column data type
*
* @returns void
*---------------------------------------------------------------------------**/
__global__ void detectJsonDataTypes(const char *data, size_t data_size, const ParseOptions opts, int num_columns,
const uint64_t *rec_starts, gdf_size_type num_records,
ColumnInfo *column_infos) {
long rec_id = threadIdx.x + (blockDim.x * blockIdx.x);
if (rec_id >= num_records)
return;
long start = rec_starts[rec_id];
// has the same semantics as end() in STL containers (one past last element)
long stop = ((rec_id < num_records - 1) ? rec_starts[rec_id + 1] : data_size);
limitRangeToBrackets(data, start, stop);
const bool is_object = (data[start - 1] == '{');
for (int col = 0; col < num_columns; col++) {
if (is_object) {
start = seekFieldNameEnd(data, opts, start, stop);
}
const long field_end = seekFieldEnd(data, opts, start, stop);
long field_data_last = field_end - 1;
adjustForWhitespaceAndQuotes(data, &start, &field_data_last);
const int field_len = field_data_last - start + 1;
// Checking if the field is empty
if (start > field_data_last ||
serializedTrieContains(opts.naValuesTrie, data + start, field_len)) {
atomicAdd(&column_infos[col].null_count, 1);
start = field_end + 1;
continue;
}
int digit_count = 0;
int decimal_count = 0;
int slash_count = 0;
int dash_count = 0;
int colon_count = 0;
int exponent_count = 0;
int other_count = 0;
const bool maybe_hex = ((field_len > 2 && data[start] == '0' && data[start + 1] == 'x') ||
(field_len > 3 && data[start] == '-' && data[start + 1] == '0' && data[start + 2] == 'x'));
for (long pos = start; pos <= field_data_last; pos++) {
if (isDigit(data[pos], maybe_hex)) {
digit_count++;
continue;
}
// Looking for unique characters that will help identify column types
switch (data[pos]) {
case '.':
decimal_count++;
break;
case '-':
dash_count++;
break;
case '/':
slash_count++;
break;
case ':':
colon_count++;
break;
case 'e':
case 'E':
if (!maybe_hex && pos > start && pos < field_data_last)
exponent_count++;
break;
default:
other_count++;
break;
}
}
// Integers have to have the length of the string
int int_req_number_cnt = field_len;
// Off by one if they start with a minus sign
if (data[start] == '-' && field_len > 1) {
--int_req_number_cnt;
}
// Off by one if they are a hexadecimal number
if (maybe_hex) {
--int_req_number_cnt;
}
if (serializedTrieContains(opts.trueValuesTrie, data + start, field_len) ||
serializedTrieContains(opts.falseValuesTrie, data + start, field_len)) {
atomicAdd(&column_infos[col].bool_count, 1);
} else if (digit_count == int_req_number_cnt) {
atomicAdd(&column_infos[col].int_count, 1);
} else if (isLikeFloat(field_len, digit_count, decimal_count, dash_count, exponent_count)) {
atomicAdd(&column_infos[col].float_count, 1);
}
// A date-time field cannot have more than 3 non-special characters
// A number field cannot have more than one decimal point
else if (other_count > 3 || decimal_count > 1) {
atomicAdd(&column_infos[col].string_count, 1);
} else {
// A date field can have either one or two '-' or '\'; A legal combination will only have one of them
// To simplify the process of auto column detection, we are not covering all the date-time formation permutations
if ((dash_count > 0 && dash_count <= 2 && slash_count == 0) ||
(dash_count == 0 && slash_count > 0 && slash_count <= 2)) {
if (colon_count <= 2) {
atomicAdd(&column_infos[col].datetime_count, 1);
} else {
atomicAdd(&column_infos[col].string_count, 1);
}
} else {
// Default field type is string
atomicAdd(&column_infos[col].string_count, 1);
}
}
start = field_end + 1;
}
}
void JsonReader::Impl::detectDataTypes(ColumnInfo *column_infos) {
int block_size;
int min_grid_size;
CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, detectJsonDataTypes));
// Calculate actual block count to use based on records count
const int grid_size = (rec_starts_.size() + block_size - 1) / block_size;
detectJsonDataTypes<<<grid_size, block_size>>>(d_data_.data(), d_data_.size(), opts_, column_names_.size(),
rec_starts_.data(), rec_starts_.size(), column_infos);
CUDA_TRY(cudaGetLastError());
}
void JsonReader::Impl::setDataTypes() {
if (!args_.dtype.empty()) {
CUDF_EXPECTS(args_.dtype.size() == column_names_.size(), "Need to specify the type of each column.\n");
// Assume that the dtype is in dictionary format only if all elements contain a colon
const bool is_dict = std::all_of(args_.dtype.begin(), args_.dtype.end(), [](const std::string &s) {
return std::find(s.begin(), s.end(), ':') != s.end();
});
if (is_dict) {
std::map<std::string, gdf_dtype> col_type_map;
for (const auto &ts : args_.dtype) {
const size_t colon_idx = ts.find(":");
const std::string col_name(ts.begin(), ts.begin() + colon_idx);
const std::string type_str(ts.begin() + colon_idx + 1, ts.end());
col_type_map[col_name] = convertStringToDtype(type_str);
}
// Using the map here allows O(n log n) complexity
for (size_t col = 0; col < args_.dtype.size(); ++col) {
dtypes_.push_back(col_type_map[column_names_[col]]);
}
} else {
for (size_t col = 0; col < args_.dtype.size(); ++col) {
dtypes_.push_back(convertStringToDtype(args_.dtype[col]));
}
}
} else {
CUDF_EXPECTS(rec_starts_.size() != 0, "No data available for data type inference.\n");
const auto num_columns = column_names_.size();
rmm::device_vector<ColumnInfo> d_column_infos(num_columns, ColumnInfo{});
detectDataTypes(d_column_infos.data().get());
thrust::host_vector<ColumnInfo> h_column_infos = d_column_infos;
for (const auto &cinfo : h_column_infos) {
if (cinfo.null_count == static_cast<int>(rec_starts_.size())){
// Entire column is NULL; allocate the smallest amount of memory
dtypes_.push_back(GDF_INT8);
} else if (cinfo.string_count > 0) {
dtypes_.push_back(GDF_STRING);
} else if (cinfo.datetime_count > 0) {
dtypes_.push_back(GDF_DATE64);
} else if (cinfo.float_count > 0 ||
(cinfo.int_count > 0 && cinfo.null_count > 0)) {
dtypes_.push_back(GDF_FLOAT64);
} else if (cinfo.int_count > 0) {
dtypes_.push_back(GDF_INT64);
} else if (cinfo.bool_count > 0) {
dtypes_.push_back(GDF_BOOL8);
} else {
CUDF_FAIL("Data type detection failed.\n");
}
}
}
}
} // namespace cudf
|
03e2e5ed79bf7398794fdcde7d907f99558b7148.hip | // !!! This is a file automatically generated by hipify!!!
#include <errno.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <unistd.h>
#include "case.cuh"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "init_hip.cuh"
#include "type.cuh"
// extern int errno;
void Statistics(IPTR, Population *p);
void Report(int gen, IPTR pop, Population *p);
void Initialize(int argc, char *argv[], Population *p, Functions *f);
void WritePid(char *pidFile);
void RmPidFile(char *pidFile);
void PhenoPrint(FILE *fp, IPTR pop, Population *p); // modified
int main(int argc, char *argv[]) {
IPTR tmp; // used for swapping two IPTRs
int foo; /* just a placeholder for a value that is not used */
Population pop, *p; // The current population under inspection
Functions funcs,
*f; // A set of function pointers which are swapped out depending on the parameters.
p = &pop;
f = &funcs;
p->generation = 0;
printf("Blah\n");
fflush(stdout);
Initialize(argc, argv, p, f);
// WritePid(p->pidFile);
printf("Blah\n");
fflush(stdout);
while (p->generation < p->maxgen) {
p->generation++;
foo = f->CurrentGA(p->oldpop, p->newpop, p->generation, p, f);
if (p->injectFraction > 0.0) {
if ((p->generation % p->injectPeriod == 0) && (p->generation <= p->injectStop)) {
LoadCases(p->newpop, p->generation, p->injectFraction, p, f);
/* printf("Loaded cases %d\n", (int) (loadPerc/100.0 * popsize));*/
}
}
Statistics(p->newpop, p);
Report(p->generation, p->newpop, p);
// Record data (best individual at each gen)
FILE *dataFile;
dataFile = fopen("myData.txt", "a");
fprintf(dataFile, "%f\n", p->newpop[p->maxi].objfunc);
fclose(dataFile);
// Record best route
FILE *routeFile;
routeFile = fopen("myRoutes.txt", "a");
// PhenoPrint<<<1,1>>>(routeFile, p->newpop, p);
PhenoPrint(routeFile, p->newpop, p);
// Wait for GPU to finish before accessing on host
// hipDeviceSynchronize();
fprintf(routeFile, "\n");
for (int i = 0; i < p->newpop->chromLen; i++)
fprintf(routeFile, "%d, ", p->newpop->chrom[i]);
fprintf(routeFile, "\n");
fclose(routeFile);
tmp = p->oldpop;
p->oldpop = p->newpop;
p->newpop = tmp;
}
if (p->nCurrentCases > 0) {
p->nCases = FindNCases(p->nCFile);
StoreNcases(p->nCFile, p->nCases, p->nCurrentCases);
}
// RmPidFile(p->pidFile);
return 0;
}
void WritePid(char *fname) {
struct stat buf;
int er;
FILE *fp;
er = stat(fname, &buf);
if (!(er == -1 || errno == ENOENT)) {
fprintf(stderr, "Lock file (%s) exists, Process running\n", fname);
fprintf(stderr, "This process is exiting....\n");
exit(1);
}
if ((fp = fopen(fname, "w")) == NULL) {
fprintf(stderr, "Error in opening file %s for writing\n", fname);
exit(2);
}
// fprintf(fp, "%lu", getpid());
}
void RmPidFile(char *fname) {
unlink(fname);
}
| 03e2e5ed79bf7398794fdcde7d907f99558b7148.cu | #include <errno.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <unistd.h>
#include "case.cuh"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "init.cuh"
#include "type.cuh"
// extern int errno;
void Statistics(IPTR, Population *p);
void Report(int gen, IPTR pop, Population *p);
void Initialize(int argc, char *argv[], Population *p, Functions *f);
void WritePid(char *pidFile);
void RmPidFile(char *pidFile);
void PhenoPrint(FILE *fp, IPTR pop, Population *p); // modified
int main(int argc, char *argv[]) {
IPTR tmp; // used for swapping two IPTRs
int foo; /* just a placeholder for a value that is not used */
Population pop, *p; // The current population under inspection
Functions funcs,
*f; // A set of function pointers which are swapped out depending on the parameters.
p = &pop;
f = &funcs;
p->generation = 0;
printf("Blah\n");
fflush(stdout);
Initialize(argc, argv, p, f);
// WritePid(p->pidFile);
printf("Blah\n");
fflush(stdout);
while (p->generation < p->maxgen) {
p->generation++;
foo = f->CurrentGA(p->oldpop, p->newpop, p->generation, p, f);
if (p->injectFraction > 0.0) {
if ((p->generation % p->injectPeriod == 0) && (p->generation <= p->injectStop)) {
LoadCases(p->newpop, p->generation, p->injectFraction, p, f);
/* printf("Loaded cases %d\n", (int) (loadPerc/100.0 * popsize));*/
}
}
Statistics(p->newpop, p);
Report(p->generation, p->newpop, p);
// Record data (best individual at each gen)
FILE *dataFile;
dataFile = fopen("myData.txt", "a");
fprintf(dataFile, "%f\n", p->newpop[p->maxi].objfunc);
fclose(dataFile);
// Record best route
FILE *routeFile;
routeFile = fopen("myRoutes.txt", "a");
// PhenoPrint<<<1,1>>>(routeFile, p->newpop, p);
PhenoPrint(routeFile, p->newpop, p);
// Wait for GPU to finish before accessing on host
// cudaDeviceSynchronize();
fprintf(routeFile, "\n");
for (int i = 0; i < p->newpop->chromLen; i++)
fprintf(routeFile, "%d, ", p->newpop->chrom[i]);
fprintf(routeFile, "\n");
fclose(routeFile);
tmp = p->oldpop;
p->oldpop = p->newpop;
p->newpop = tmp;
}
if (p->nCurrentCases > 0) {
p->nCases = FindNCases(p->nCFile);
StoreNcases(p->nCFile, p->nCases, p->nCurrentCases);
}
// RmPidFile(p->pidFile);
return 0;
}
void WritePid(char *fname) {
struct stat buf;
int er;
FILE *fp;
er = stat(fname, &buf);
if (!(er == -1 || errno == ENOENT)) {
fprintf(stderr, "Lock file (%s) exists, Process running\n", fname);
fprintf(stderr, "This process is exiting....\n");
exit(1);
}
if ((fp = fopen(fname, "w")) == NULL) {
fprintf(stderr, "Error in opening file %s for writing\n", fname);
exit(2);
}
// fprintf(fp, "%lu", getpid());
}
void RmPidFile(char *fname) {
unlink(fname);
}
|
320918be977ab3dfe5f3a017ef66f6fb4233ecff.hip | // !!! This is a file automatically generated by hipify!!!
/**
* (C) Copyright 2020, 2021 IBM. All Rights Reserved.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#include "rpu_pulsed_meta_parameter.h"
#include "rpucuda_difference_device.h"
#include <memory>
namespace RPU {
/******************************************************************************************/
/* DifferenceRPUDeviceCuda
CUDA implementation of DifferenceRPUDevice
*/
template <typename T>
DifferenceRPUDeviceCuda<T>::DifferenceRPUDeviceCuda(
CudaContext *c, const DifferenceRPUDevice<T> &rpu_device) {
this->context_ = c;
populateFrom(rpu_device); // use populate to call parent
};
// copy construcutor
template <typename T>
DifferenceRPUDeviceCuda<T>::DifferenceRPUDeviceCuda(const DifferenceRPUDeviceCuda<T> &other)
: VectorRPUDeviceCuda<T>(other) {
g_plus_ = other.g_plus_;
g_minus_ = other.g_minus_;
dev_reduce_weightening_inverted_ = nullptr;
if (other.dev_reduce_weightening_inverted_ != nullptr) {
dev_reduce_weightening_inverted_ = RPU::make_unique<CudaArray<T>>(this->context_, 2);
dev_reduce_weightening_inverted_->assign(*other.dev_reduce_weightening_inverted_);
dev_reduce_weightening_inverted_->synchronize();
}
};
// copy assignment
template <typename T>
DifferenceRPUDeviceCuda<T> &
DifferenceRPUDeviceCuda<T>::operator=(const DifferenceRPUDeviceCuda<T> &other) {
DifferenceRPUDeviceCuda<T> tmp(other);
swap(*this, tmp);
return *this;
};
// move constructor
template <typename T>
DifferenceRPUDeviceCuda<T>::DifferenceRPUDeviceCuda(DifferenceRPUDeviceCuda<T> &&other) {
*this = std::move(other);
};
// move assignment
template <typename T>
DifferenceRPUDeviceCuda<T> &
DifferenceRPUDeviceCuda<T>::operator=(DifferenceRPUDeviceCuda<T> &&other) {
VectorRPUDeviceCuda<T>::operator=(std::move(other));
g_plus_ = other.g_plus_;
g_minus_ = other.g_minus_;
dev_reduce_weightening_inverted_ = std::move(other.dev_reduce_weightening_inverted_);
return *this;
};
template <typename T>
void DifferenceRPUDeviceCuda<T>::populateFrom(const AbstractRPUDevice<T> &rpu_device_in) {
const auto *rpu_device = dynamic_cast<const DifferenceRPUDevice<T> *>(&rpu_device_in);
if (rpu_device == nullptr) {
RPU_FATAL("Expect DifferenceRPUDevice.");
}
VectorRPUDeviceCuda<T>::populateFrom(rpu_device_in);
rpu_device->getGIndices(g_plus_, g_minus_);
// inverted
std::vector<T> rw_inv(2);
const T *rw = rpu_device->getReduceWeightening();
rw_inv[0] = rw[1];
rw_inv[1] = rw[0];
this->dev_reduce_weightening_->assign(rw);
dev_reduce_weightening_inverted_ = RPU::make_unique<CudaArray<T>>(this->context_, 2, &rw_inv[0]);
this->context_->synchronize();
}
template <typename T>
void DifferenceRPUDeviceCuda<T>::resetCols(
T *dev_weights, int start_col, int n_cols, T reset_prob) {
VectorRPUDeviceCuda<T>::resetCols(dev_weights, start_col, n_cols, reset_prob);
}
template <typename T> bool DifferenceRPUDeviceCuda<T>::isInverted() const { return g_plus_ == 0; }
template <typename T> void DifferenceRPUDeviceCuda<T>::invert() {
std::swap(g_plus_, g_minus_);
std::swap(this->dev_reduce_weightening_, this->dev_reduce_weightening_inverted_);
}
template <typename T>
pwukpvec_t<T> DifferenceRPUDeviceCuda<T>::getUpdateKernels(
int m_batch, int nK32, int use_bo64, bool out_trans, const PulsedUpdateMetaParameter<T> &up) {
if (this->rpucuda_device_vec_.size() != 2) {
RPU_FATAL("Expect exactly two devices.");
}
if (getPar().singleDeviceUpdate()) {
RPU_FATAL("Single device update not supported for Difference Device");
}
return VectorRPUDeviceCuda<T>::getUpdateKernels(m_batch, nK32, use_bo64, out_trans, up);
}
template <typename T>
void DifferenceRPUDeviceCuda<T>::runUpdateKernel(
pwukp_t<T> kpars,
CudaContext *up_context,
T *dev_weights,
int m_batch,
const BitLineMaker<T> *blm,
const PulsedUpdateMetaParameter<T> &up,
hiprandState_t *dev_states,
int one_sided,
uint32_t *x_counts_chunk,
uint32_t *d_counts_chunk) {
// calling kpars->run(..,this,..) directly should cause error
// because difference derived from abstract device..
DEBUG_OUT("start run update kernel.");
DEBUG_CALL(kpars->print(););
if (one_sided != 0) {
RPU_FATAL("Cannot use one_sided here.");
}
bool same_context = getPar().same_context;
if (!same_context) {
up_context->recordEvent();
}
CudaContext *cp = up_context;
CudaContext *cm = up_context;
if (!same_context) {
cp = &*this->context_vec_[g_plus_];
cm = &*this->context_vec_[g_minus_];
}
int n = kpars->getNStates() / 2; // each device uses different random states
if (!same_context) {
cp->waitEvent(up_context->getEvent());
cm->waitEvent(up_context->getEvent());
}
kpars->run(
cp->getStream(), this->dev_weights_ptrs_[g_plus_], m_batch, blm,
static_cast<PulsedRPUDeviceCuda<T> *>(&*this->rpucuda_device_vec_[g_plus_]), // checked above
up, dev_states,
1, // one sided!!
x_counts_chunk, d_counts_chunk);
kpars->run(
cm->getStream(), this->dev_weights_ptrs_[g_minus_], m_batch, blm,
static_cast<PulsedRPUDeviceCuda<T> *>(&*this->rpucuda_device_vec_[g_minus_]), // checked above
up, dev_states,
-1, // one sided!!
x_counts_chunk, d_counts_chunk);
if (!same_context) {
up_context->recordWaitEvent(cp);
up_context->recordWaitEvent(cm);
}
this->reduceToWeights(up_context, dev_weights);
}
template class DifferenceRPUDeviceCuda<float>;
#ifdef RPU_USE_DOUBLE
template class DifferenceRPUDeviceCuda<double>;
#endif
} // namespace RPU
| 320918be977ab3dfe5f3a017ef66f6fb4233ecff.cu | /**
* (C) Copyright 2020, 2021 IBM. All Rights Reserved.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#include "rpu_pulsed_meta_parameter.h"
#include "rpucuda_difference_device.h"
#include <memory>
namespace RPU {
/******************************************************************************************/
/* DifferenceRPUDeviceCuda
CUDA implementation of DifferenceRPUDevice
*/
template <typename T>
DifferenceRPUDeviceCuda<T>::DifferenceRPUDeviceCuda(
CudaContext *c, const DifferenceRPUDevice<T> &rpu_device) {
this->context_ = c;
populateFrom(rpu_device); // use populate to call parent
};
// copy construcutor
template <typename T>
DifferenceRPUDeviceCuda<T>::DifferenceRPUDeviceCuda(const DifferenceRPUDeviceCuda<T> &other)
: VectorRPUDeviceCuda<T>(other) {
g_plus_ = other.g_plus_;
g_minus_ = other.g_minus_;
dev_reduce_weightening_inverted_ = nullptr;
if (other.dev_reduce_weightening_inverted_ != nullptr) {
dev_reduce_weightening_inverted_ = RPU::make_unique<CudaArray<T>>(this->context_, 2);
dev_reduce_weightening_inverted_->assign(*other.dev_reduce_weightening_inverted_);
dev_reduce_weightening_inverted_->synchronize();
}
};
// copy assignment
template <typename T>
DifferenceRPUDeviceCuda<T> &
DifferenceRPUDeviceCuda<T>::operator=(const DifferenceRPUDeviceCuda<T> &other) {
DifferenceRPUDeviceCuda<T> tmp(other);
swap(*this, tmp);
return *this;
};
// move constructor
template <typename T>
DifferenceRPUDeviceCuda<T>::DifferenceRPUDeviceCuda(DifferenceRPUDeviceCuda<T> &&other) {
*this = std::move(other);
};
// move assignment
template <typename T>
DifferenceRPUDeviceCuda<T> &
DifferenceRPUDeviceCuda<T>::operator=(DifferenceRPUDeviceCuda<T> &&other) {
VectorRPUDeviceCuda<T>::operator=(std::move(other));
g_plus_ = other.g_plus_;
g_minus_ = other.g_minus_;
dev_reduce_weightening_inverted_ = std::move(other.dev_reduce_weightening_inverted_);
return *this;
};
template <typename T>
void DifferenceRPUDeviceCuda<T>::populateFrom(const AbstractRPUDevice<T> &rpu_device_in) {
const auto *rpu_device = dynamic_cast<const DifferenceRPUDevice<T> *>(&rpu_device_in);
if (rpu_device == nullptr) {
RPU_FATAL("Expect DifferenceRPUDevice.");
}
VectorRPUDeviceCuda<T>::populateFrom(rpu_device_in);
rpu_device->getGIndices(g_plus_, g_minus_);
// inverted
std::vector<T> rw_inv(2);
const T *rw = rpu_device->getReduceWeightening();
rw_inv[0] = rw[1];
rw_inv[1] = rw[0];
this->dev_reduce_weightening_->assign(rw);
dev_reduce_weightening_inverted_ = RPU::make_unique<CudaArray<T>>(this->context_, 2, &rw_inv[0]);
this->context_->synchronize();
}
template <typename T>
void DifferenceRPUDeviceCuda<T>::resetCols(
T *dev_weights, int start_col, int n_cols, T reset_prob) {
VectorRPUDeviceCuda<T>::resetCols(dev_weights, start_col, n_cols, reset_prob);
}
template <typename T> bool DifferenceRPUDeviceCuda<T>::isInverted() const { return g_plus_ == 0; }
template <typename T> void DifferenceRPUDeviceCuda<T>::invert() {
std::swap(g_plus_, g_minus_);
std::swap(this->dev_reduce_weightening_, this->dev_reduce_weightening_inverted_);
}
template <typename T>
pwukpvec_t<T> DifferenceRPUDeviceCuda<T>::getUpdateKernels(
int m_batch, int nK32, int use_bo64, bool out_trans, const PulsedUpdateMetaParameter<T> &up) {
if (this->rpucuda_device_vec_.size() != 2) {
RPU_FATAL("Expect exactly two devices.");
}
if (getPar().singleDeviceUpdate()) {
RPU_FATAL("Single device update not supported for Difference Device");
}
return VectorRPUDeviceCuda<T>::getUpdateKernels(m_batch, nK32, use_bo64, out_trans, up);
}
template <typename T>
void DifferenceRPUDeviceCuda<T>::runUpdateKernel(
pwukp_t<T> kpars,
CudaContext *up_context,
T *dev_weights,
int m_batch,
const BitLineMaker<T> *blm,
const PulsedUpdateMetaParameter<T> &up,
curandState_t *dev_states,
int one_sided,
uint32_t *x_counts_chunk,
uint32_t *d_counts_chunk) {
// calling kpars->run(..,this,..) directly should cause error
// because difference derived from abstract device..
DEBUG_OUT("start run update kernel.");
DEBUG_CALL(kpars->print(););
if (one_sided != 0) {
RPU_FATAL("Cannot use one_sided here.");
}
bool same_context = getPar().same_context;
if (!same_context) {
up_context->recordEvent();
}
CudaContext *cp = up_context;
CudaContext *cm = up_context;
if (!same_context) {
cp = &*this->context_vec_[g_plus_];
cm = &*this->context_vec_[g_minus_];
}
int n = kpars->getNStates() / 2; // each device uses different random states
if (!same_context) {
cp->waitEvent(up_context->getEvent());
cm->waitEvent(up_context->getEvent());
}
kpars->run(
cp->getStream(), this->dev_weights_ptrs_[g_plus_], m_batch, blm,
static_cast<PulsedRPUDeviceCuda<T> *>(&*this->rpucuda_device_vec_[g_plus_]), // checked above
up, dev_states,
1, // one sided!!
x_counts_chunk, d_counts_chunk);
kpars->run(
cm->getStream(), this->dev_weights_ptrs_[g_minus_], m_batch, blm,
static_cast<PulsedRPUDeviceCuda<T> *>(&*this->rpucuda_device_vec_[g_minus_]), // checked above
up, dev_states,
-1, // one sided!!
x_counts_chunk, d_counts_chunk);
if (!same_context) {
up_context->recordWaitEvent(cp);
up_context->recordWaitEvent(cm);
}
this->reduceToWeights(up_context, dev_weights);
}
template class DifferenceRPUDeviceCuda<float>;
#ifdef RPU_USE_DOUBLE
template class DifferenceRPUDeviceCuda<double>;
#endif
} // namespace RPU
|
d07dd224d18b7ac420c83d690baef12fbe14b86d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <chrono>
#include <cstring>
#include <cstdio>
#include <vector>
#include "device_kernel_wrapper.h"
#include "datatypes.h"
#include "kernel_common.h"
#include "memory_scheduler.h"
__global__
void device_chain_tiled(
return_dt *ret, const anchor_dt *a,
const control_dt *control, score_dt *max_tracker_g, parent_dt *j_tracker_g,
const int max_dist_x, const int max_dist_y, const int bw);
__host__
void device_chain_kernel_wrapper(
std::vector<control_dt> &cont,
std::vector<anchor_dt> &arg,
std::vector<return_dt> &ret,
int max_dist_x, int max_dist_y, int bw)
{
auto batch_count = cont.size() / PE_NUM;
control_dt *h_control;
anchor_dt *h_arg;
return_dt *h_ret;
hipHostMalloc(&h_control, cont.size() * sizeof(control_dt));
hipHostMalloc(&h_arg, arg.size() * sizeof(anchor_dt));
hipHostMalloc(&h_ret, batch_count * TILE_SIZE * PE_NUM * sizeof(return_dt));
ret.resize(batch_count * TILE_SIZE * PE_NUM);
memcpy(h_control, cont.data(), cont.size() * sizeof(control_dt));
memcpy(h_arg, arg.data(), arg.size() * sizeof(anchor_dt));
control_dt *d_control;
anchor_dt *d_arg;
return_dt *d_ret;
score_dt *d_max_tracker;
parent_dt *d_j_tracker;
hipMalloc((void**)&d_control, cont.size() * sizeof(control_dt));
hipMalloc((void**)&d_arg, arg.size() * sizeof(anchor_dt));
hipMalloc((void**)&d_ret, batch_count * TILE_SIZE * PE_NUM * sizeof(return_dt));
hipMalloc((void**)&d_max_tracker, PE_NUM * BACK_SEARCH_COUNT_GPU * sizeof(score_dt));
hipMalloc((void**)&d_j_tracker, PE_NUM * BACK_SEARCH_COUNT_GPU * sizeof(parent_dt));
hipMemcpy(d_control, h_control, cont.size() * sizeof(control_dt), hipMemcpyHostToDevice);
hipMemcpy(d_arg, h_arg, arg.size() * sizeof(anchor_dt), hipMemcpyHostToDevice);
hipDeviceSynchronize();
auto k_start = std::chrono::steady_clock::now();
for (auto batch = 0; batch < batch_count; batch++) {
hipLaunchKernelGGL(( device_chain_tiled), dim3(BLOCK_NUM), dim3(BACK_SEARCH_COUNT_GPU), 0, 0,
d_ret + batch * PE_NUM * TILE_SIZE,
d_arg + batch * PE_NUM * TILE_SIZE_ACTUAL,
d_control + batch * PE_NUM ,
d_max_tracker,
d_j_tracker,
max_dist_x, max_dist_y, bw);
}
hipDeviceSynchronize();
auto k_end = std::chrono::steady_clock::now();
auto k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count();
printf("Total kernel execution time: %f (s)\n", k_time * 1e-9);
hipMemcpy(h_ret, d_ret, batch_count * TILE_SIZE * PE_NUM * sizeof(return_dt), hipMemcpyDeviceToHost);
hipFree(d_control);
hipFree(d_arg);
hipFree(d_ret);
hipFree(d_max_tracker);
hipFree(d_j_tracker);
memcpy(ret.data(), h_ret, batch_count * TILE_SIZE * PE_NUM * sizeof(return_dt));
}
| d07dd224d18b7ac420c83d690baef12fbe14b86d.cu | #include <chrono>
#include <cstring>
#include <cstdio>
#include <vector>
#include "device_kernel_wrapper.h"
#include "datatypes.h"
#include "kernel_common.h"
#include "memory_scheduler.h"
__global__
void device_chain_tiled(
return_dt *ret, const anchor_dt *a,
const control_dt *control, score_dt *max_tracker_g, parent_dt *j_tracker_g,
const int max_dist_x, const int max_dist_y, const int bw);
__host__
void device_chain_kernel_wrapper(
std::vector<control_dt> &cont,
std::vector<anchor_dt> &arg,
std::vector<return_dt> &ret,
int max_dist_x, int max_dist_y, int bw)
{
auto batch_count = cont.size() / PE_NUM;
control_dt *h_control;
anchor_dt *h_arg;
return_dt *h_ret;
cudaMallocHost(&h_control, cont.size() * sizeof(control_dt));
cudaMallocHost(&h_arg, arg.size() * sizeof(anchor_dt));
cudaMallocHost(&h_ret, batch_count * TILE_SIZE * PE_NUM * sizeof(return_dt));
ret.resize(batch_count * TILE_SIZE * PE_NUM);
memcpy(h_control, cont.data(), cont.size() * sizeof(control_dt));
memcpy(h_arg, arg.data(), arg.size() * sizeof(anchor_dt));
control_dt *d_control;
anchor_dt *d_arg;
return_dt *d_ret;
score_dt *d_max_tracker;
parent_dt *d_j_tracker;
cudaMalloc((void**)&d_control, cont.size() * sizeof(control_dt));
cudaMalloc((void**)&d_arg, arg.size() * sizeof(anchor_dt));
cudaMalloc((void**)&d_ret, batch_count * TILE_SIZE * PE_NUM * sizeof(return_dt));
cudaMalloc((void**)&d_max_tracker, PE_NUM * BACK_SEARCH_COUNT_GPU * sizeof(score_dt));
cudaMalloc((void**)&d_j_tracker, PE_NUM * BACK_SEARCH_COUNT_GPU * sizeof(parent_dt));
cudaMemcpy(d_control, h_control, cont.size() * sizeof(control_dt), cudaMemcpyHostToDevice);
cudaMemcpy(d_arg, h_arg, arg.size() * sizeof(anchor_dt), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
auto k_start = std::chrono::steady_clock::now();
for (auto batch = 0; batch < batch_count; batch++) {
device_chain_tiled<<<BLOCK_NUM, BACK_SEARCH_COUNT_GPU>>> (
d_ret + batch * PE_NUM * TILE_SIZE,
d_arg + batch * PE_NUM * TILE_SIZE_ACTUAL,
d_control + batch * PE_NUM ,
d_max_tracker,
d_j_tracker,
max_dist_x, max_dist_y, bw);
}
cudaDeviceSynchronize();
auto k_end = std::chrono::steady_clock::now();
auto k_time = std::chrono::duration_cast<std::chrono::nanoseconds>(k_end - k_start).count();
printf("Total kernel execution time: %f (s)\n", k_time * 1e-9);
cudaMemcpy(h_ret, d_ret, batch_count * TILE_SIZE * PE_NUM * sizeof(return_dt), cudaMemcpyDeviceToHost);
cudaFree(d_control);
cudaFree(d_arg);
cudaFree(d_ret);
cudaFree(d_max_tracker);
cudaFree(d_j_tracker);
memcpy(ret.data(), h_ret, batch_count * TILE_SIZE * PE_NUM * sizeof(return_dt));
}
|
ae96c48b26457f6e424019a862650f22e28fc9c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by przemo on 27.12.2019.
//
#include "ConvSeparateWeightsLayer.h"
namespace NeuralNetworkGPU
{
/*
*
*/
__global__ void determineOutputFuncConvSW(float *t_input, TensorSize *t_inputSize,
float *t_output,
float *t_sums,
float *t_weights, MatrixSize *t_filterSize,
float *t_deltas,
float *d_b,
int *d_stride)
{
long index = blockIdx.x + blockIdx.y*gridDim.x + threadIdx.x*gridDim.x*gridDim.y;
//sums x[i]*w[i]
int yFrame = t_inputSize->x*t_inputSize->y;
int yfFrame = t_filterSize->y*t_filterSize->x;
int xOffset = *d_stride*blockIdx.x;
int yOffset = *d_stride*blockIdx.y*t_inputSize->x;
int zfOffset = yfFrame*t_inputSize->z*index;
float sum = 0;
for(int y=0,yf=0,yi=yOffset; y<t_filterSize->y; y++)
{
for(int x=0; x<t_filterSize->x; x++)
{
for(int z=0,zf=zfOffset,zi=0; z<t_inputSize->z; z++)
{
sum += t_input[xOffset+x + yi + zi] * t_weights[x + yf + zf];
zf+=yfFrame;
zi+=yFrame;
}
}
yf+=t_filterSize->x;
yi+=t_inputSize->y;
}
t_sums[index] = sum;
// sum = sum > 255 ? 255 : sum;
// sum = sum < -255 ? -255 : sum;
//reset delta
t_deltas[index] = 0;
//activation function
t_output[index] =
1 / (1 + exp(-(*d_b)*sum) ); //sigmoid function
// sum > 0 ? sum : sum*0.05; //RELU function
}
/*
*
*/
__global__ void learnSGDConvSW(float *t_input, TensorSize *t_inputSize,
float *t_output,
float *t_sums,
float *t_weights, MatrixSize *t_filterSize,
float *t_deltas, float *t_prevDeltas,
float *d_n,float *d_b)
{
long index = blockIdx.x + blockIdx.y*gridDim.x + threadIdx.x*gridDim.x*gridDim.y;
float delta = t_deltas[index];
//determine common multiplier
float e = exp(-(*d_b)*t_sums[index]);
float m = 1 + e;
float derivative = ((*d_b)*e/(m*m));
// float derivative = t_sums[index] > 0 ? 1 : 0.05;
float p = (*d_n)* delta * derivative;
//calculate new weights
int yFrame = t_inputSize->x*t_inputSize->y;
int yfFrame = t_filterSize->y*t_filterSize->x;
int yOffset = blockIdx.y*t_inputSize->x;
int zfOffset = yfFrame*t_inputSize->z*index;
for(int y=0,yf=0,yi=yOffset; y<t_filterSize->y; y++)
{
for(int x=0; x<t_filterSize->x; x++)
{
for(int z=0,zf=zfOffset,zi=0; z<t_inputSize->z; z++)
{
t_weights[ x + yf + zf ] -= p*t_input[blockIdx.x+x + yi + zi];
zf+=yfFrame;
zi+=yFrame;
}
}
yf+=t_filterSize->x;
yi+=t_inputSize->y;
}
//set delta to deeper neurons
if(t_prevDeltas != nullptr)
{
float dd = delta*derivative;
for(int y=0,yf=0,yi=yOffset; y<t_filterSize->y; y++)
{
for(int x=0; x<t_filterSize->x; x++)
{
for(int z=0,zf=0,zi=0; z<t_inputSize->z; z++)
{
t_prevDeltas[blockIdx.x+x + yi + zi] += dd * t_weights[ x + yf + zf ];
zf+=yfFrame;
zi+=yFrame;
}
}
yf+=t_filterSize->x;
yi+=t_inputSize->y;
}
}
//reset delta
t_deltas[index] = 0;
}
/*
*
*/
__global__ void learnAdamConvSW(float *t_input, TensorSize *t_inputSize,
float *t_output,
float *t_sums,
float *t_weights, MatrixSize *t_filterSize,
float *t_deltas, float *t_prevDeltas,
float *t_m,float *t_v,
float *t_n,float *t_b,
float *t_B1,float *t_B2,
int *d_stride)
{
long index = blockIdx.x + blockIdx.y*gridDim.x + threadIdx.x*gridDim.x*gridDim.y;
float delta = t_deltas[index];
//determine derivative and gradients
float e = exp(-(*t_b)*t_sums[index]);
float m = 1 + e;
float derivative = ((*t_b)*e/(m*m));
// float sum = t_sums[index];
// float derivative = sum > 0 && sum < 65536 ? 1 : 0.05;
float grad = delta*derivative; // gradient without x factor
float grad2 = grad*grad;
//calculate new weights
int yFrame = t_inputSize->x*t_inputSize->y;
int yfFrame = t_filterSize->y*t_filterSize->x;
int xOffset = *d_stride*blockIdx.x;
int yOffset = *d_stride*blockIdx.y*t_inputSize->x;
int zfOffset = yfFrame*t_inputSize->z*index;
float mTarget,vTarget;
float mNew, vNew;
for(int y=0,yf=0,yi=yOffset; y<t_filterSize->y; y++)
{
for(int x=0; x<t_filterSize->x; x++)
{
for(int z=0,zf=zfOffset,zi=0; z<t_inputSize->z; z++)
{
float input = t_input[xOffset+x + yi + zi];
//calculate new m & v
mTarget = grad*input;
vTarget = grad2*input*input;
mNew = mTarget - (*t_B1)*(mTarget-t_m[x + yf + zf]);
vNew = vTarget - (*t_B2)*(vTarget-t_v[x + yf + zf]);
t_m[x + yf + zf] = mNew;
t_v[x + yf + zf] = vNew;
//update weights
t_weights[x + yf + zf] -= __fdiv_rd ((*t_n)*mNew , (__fsqrt_rd(vNew)+0.0000001));
zf+=yfFrame;
zi+=yFrame;
}
}
yf+=t_filterSize->x;
yi+=t_inputSize->y;
}
//set delta to deeper neurons
if(t_prevDeltas != nullptr)
{
float dd = delta*derivative;
for(int y=0,yf=0,yi=yOffset; y<t_filterSize->y; y++)
{
for(int x=0; x<t_filterSize->x; x++)
{
for(int z=0,zf=zfOffset,zi=0; z<t_inputSize->z; z++)
{
t_prevDeltas[blockIdx.x+x + yi + zi] += dd * t_weights[ x + yf + zf ];
zf+=yfFrame;
zi+=yFrame;
}
}
yf+=t_filterSize->x;
yi+=t_inputSize->y;
}
}
//reset delta
t_deltas[index] = 0;
}
/*
*
*/
__global__ void scaleWeightsConvSW(TensorSize *t_inputSize,
float *t_weights, MatrixSize *t_filterSize)
{
//calculate new weights
int yfFrame = t_filterSize->y*t_filterSize->x;
int zfOffset = yfFrame*t_inputSize->z*threadIdx.x;
float sum = 0;
for(int y=0,yf=0; y<t_filterSize->y; y++)
{
for(int x=0; x<t_filterSize->x; x++)
{
for(int z=0,zf=zfOffset; z<t_inputSize->z; z++)
{
sum = abs(t_weights[ x + yf + zf ]) > sum ? abs(t_weights[ x + yf + zf ]) : sum;
zf+=yfFrame;
}
}
yf+=t_filterSize->x;
}
__fdiv_rd(sum,100);
for(int y=0,yf=0; y<t_filterSize->y; y++)
{
for(int x=0; x<t_filterSize->x; x++)
{
for(int z=0,zf=zfOffset; z<t_inputSize->z; z++)
{
t_weights[ x + yf + zf ] = __fdiv_rd(t_weights[ x + yf + zf ],sum);
zf+=yfFrame;
}
}
yf+=t_filterSize->x;
}
}
/*
*
*/
ConvSeparateWeightsLayer::ConvSeparateWeightsLayer(float t_parameterB, float t_learnRate, int convLayers,
MatrixSize t_filterSize, NeuronsPtr t_prevLayerReference, int t_stride)
{
float b1 = 0.9, b2 = 0.999;
prevLayerId = t_prevLayerReference.id;
size = TensorSize((t_prevLayerReference.tSize.x-t_filterSize.x+1)/t_stride,
(t_prevLayerReference.tSize.y-t_filterSize.y+1)/t_stride,
convLayers);
de_input = t_prevLayerReference.inputPtr;
//learn rate
hipMalloc( (void **) &d_n, sizeof(float));
hipMemcpy(d_n, &(t_learnRate), sizeof(float), hipMemcpyHostToDevice);
//parameter b
hipMalloc( (void **) &d_b, sizeof(float));
hipMemcpy(d_b, &(t_parameterB), sizeof(float), hipMemcpyHostToDevice);
//Adam parameters
hipMalloc( (void **) &d_B1, sizeof(float));
hipMemcpy(d_B1, &(b1), sizeof(float), hipMemcpyHostToDevice);
hipMalloc( (void **) &d_B2, sizeof(float));
hipMemcpy(d_B2, &(b2), sizeof(float), hipMemcpyHostToDevice);
//stride
int stride = t_stride;
hipMalloc( (void **) &d_stride, sizeof(int));
hipMemcpy(d_stride, &(stride), sizeof(int), hipMemcpyHostToDevice);
//input size
hipMalloc( (void **) &d_inputSize, sizeof(TensorSize));
hipMemcpy(d_inputSize, &t_prevLayerReference.tSize, sizeof(TensorSize), hipMemcpyHostToDevice);
inputSize = t_prevLayerReference.tSize;
//output
hipMalloc( (void **) &d_output, sizeof(float)*size.m);
output = (float*) std::malloc(sizeof(float)*size.m);
//filter size
filterSize = t_filterSize;
hipMalloc( (void **) &d_filterSize, sizeof(MatrixSize));
hipMemcpy(d_filterSize, &t_filterSize, sizeof(MatrixSize), hipMemcpyHostToDevice);
//weights
long weightsSize = t_filterSize.m*t_prevLayerReference.tSize.z*size.z*size.y*size.x;
hipMalloc( (void **) &d_weights, sizeof(float)*weightsSize);
initWeights();
//sums
hipMalloc( (void **) &d_sums, sizeof(float)*size.m);
//deltas
hipMalloc( (void **) &d_deltas, sizeof(float)*size.m);
deltas = (float*) malloc(sizeof(float)*size.m);
de_prevDeltas = t_prevLayerReference.deltaPtr;
//adam learn
float *zeros = (float*) malloc(sizeof(float)*weightsSize);
for(int i=0; i<weightsSize; i++) zeros[i] = 0;
hipMalloc( (void **) &d_m, sizeof(float)*weightsSize);
hipMemcpy(d_m, zeros, sizeof(float)*weightsSize, hipMemcpyHostToDevice);
hipMalloc( (void **) &d_v, sizeof(float)*weightsSize);
hipMemcpy(d_v, zeros, sizeof(float)*weightsSize, hipMemcpyHostToDevice);
free(zeros);
}
/*
*
*/
ConvSeparateWeightsLayer::~ConvSeparateWeightsLayer()
{
hipFree(d_n);
hipFree(d_b);
hipFree(d_B1);
hipFree(d_B2);
hipFree(d_inputSize);
hipFree(d_output);
hipFree(d_sums);
hipFree(d_weights);
hipFree(d_filterSize);
hipFree(d_deltas);
hipFree(d_m);
hipFree(d_v);
free(output);
free(deltas);
}
/*
*
*/
void ConvSeparateWeightsLayer::initWeights()
{
long weightsSize = filterSize.m*inputSize.z*size.z*size.y*size.x;
float *randomValues = (float*) malloc(sizeof(float)*weightsSize);
for(int i=0; i< weightsSize; i++)
{
float randomValue = getRandomWeight();
randomValues[i] = randomValue;
}
hipMemcpy(d_weights, randomValues, sizeof(float)*weightsSize, hipMemcpyHostToDevice);
free(randomValues);
}
/*
*
*/
void ConvSeparateWeightsLayer::setWeights(float* t_weights)
{
long weightsSize = filterSize.m*inputSize.z*size.z*size.y*size.x;
hipMemcpy(d_weights, t_weights, sizeof(float)*weightsSize, hipMemcpyHostToDevice);
}
/*
*
*/
void ConvSeparateWeightsLayer::setMomentum1(float* t_momentum)
{
long weightsSize = filterSize.m*inputSize.z*size.z*size.y*size.x;
hipMemcpy(d_m, t_momentum, sizeof(float)*weightsSize, hipMemcpyHostToDevice);
}
/*
*
*/
void ConvSeparateWeightsLayer::setMomentum2(float* t_momentum)
{
long weightsSize = filterSize.m*inputSize.z*size.z*size.y*size.x;
hipMemcpy(d_v, t_momentum, sizeof(float)*weightsSize, hipMemcpyHostToDevice);
}
/*
*
*/
std::vector<double> ConvSeparateWeightsLayer::getOutput()
{
hipMemcpy(output, d_output, sizeof(float)*size.m, hipMemcpyDeviceToHost);
std::vector<double> result;
int outputSize = size.multiply();
for(int i=0; i<outputSize; i++ )
{
double v = output[i];
result.push_back(v);
}
return result;
}
void ConvSeparateWeightsLayer::determineOutput()
{
dim3 threadsPerBlock(size.z);
dim3 numBlocks(size.x, size.y);
hipLaunchKernelGGL(( determineOutputFuncConvSW), dim3(numBlocks) , dim3(threadsPerBlock) , 0, 0, de_input, d_inputSize,
d_output,
d_sums,
d_weights, d_filterSize,
d_deltas,
d_b,
d_stride);
}
void ConvSeparateWeightsLayer::learnSGD()
{
// int64 timeBefore = cv::getTickCount();
dim3 threadsPerBlock(size.z);
dim3 numBlocks(size.x, size.y);
hipLaunchKernelGGL(( learnSGDConvSW), dim3(numBlocks) , dim3(threadsPerBlock) , 0, 0, de_input, d_inputSize,
d_output,
d_sums,
d_weights, d_filterSize,
d_deltas, de_prevDeltas,
d_n, d_b);
// int64 afterBefore = cv::getTickCount();
// std::cout << "Sigm: " << (afterBefore - timeBefore)/ cv::getTickFrequency() << "\n";
}
void ConvSeparateWeightsLayer::learnAdam()
{
dim3 threadsPerBlock(size.z);
dim3 numBlocks(size.x, size.y);
hipLaunchKernelGGL(( learnAdamConvSW), dim3(numBlocks) , dim3(threadsPerBlock) , 0, 0, de_input, d_inputSize,
d_output,
d_sums,
d_weights, d_filterSize,
d_deltas, de_prevDeltas,
d_m, d_v,
d_n, d_b,
d_B1, d_B2,
d_stride);
}
/*
*
*/
NeuronsPtr ConvSeparateWeightsLayer::getNeuronPtr()
{
return NeuronsPtr(layerId, d_output,size, d_deltas);
}
/*
*
*/
void ConvSeparateWeightsLayer::saveToFile(std::ofstream &t_file)
{
t_file << (float) getLayerTypeId() << ' '; //Signature of SigmoidLayer
t_file << (float) prevLayerId << ' '; //Id of previous layer
t_file << (float) inputSize.x << ' ';
t_file << (float) inputSize.y << ' ';
t_file << (float) inputSize.z << ' ';
t_file << (float) size.z << ' ';
t_file << (float) filterSize.x << ' ';
t_file << (float) filterSize.y << ' ';
float learnRate;
hipMemcpy(&learnRate, d_n, sizeof(float), hipMemcpyDeviceToHost);
t_file << learnRate << ' ';
float b;
hipMemcpy(&b, d_b, sizeof(float), hipMemcpyDeviceToHost);
t_file << b << ' ';
int stride;
hipMemcpy(&stride, d_stride, sizeof(int), hipMemcpyDeviceToHost);
t_file << (float) stride << ' ';
//Weights
long weightsSize = filterSize.m*inputSize.z*size.z*size.y*size.x;
float *weights = (float*) malloc(sizeof(float)*weightsSize);
hipMemcpy(weights, d_weights, sizeof(float)*weightsSize, hipMemcpyDeviceToHost);
for(int i=0; i< weightsSize; i++)
{
t_file << weights[i] << ' ';
}
hipMemcpy(weights, d_m, sizeof(float)*weightsSize, hipMemcpyDeviceToHost);
for(int i=0; i< weightsSize; i++)
{
t_file << weights[i] << ' ';
}
hipMemcpy(weights, d_v, sizeof(float)*weightsSize, hipMemcpyDeviceToHost);
for(int i=0; i< weightsSize; i++)
{
t_file << weights[i] << ' ';
}
free(weights);
}
/*
*
*/
ConvSeparateWeightsLayer* ConvSeparateWeightsLayer::loadFromFile(std::ifstream &t_file, std::vector<NeuronsPtr> &t_prevLayerReferences)
{
float filterSize[2], convSize, inputSize[3];
float learnRate, b, stridef;
float prevId;
int stride;
t_file >> prevId;
t_file >> inputSize[0];
t_file >> inputSize[1];
t_file >> inputSize[2];
t_file >> convSize;
t_file >> filterSize[0];
t_file >> filterSize[1];
t_file >> learnRate;
t_file >> b;
t_file >> stridef;
stride = stridef;
ConvSeparateWeightsLayer* layer = new ConvSeparateWeightsLayer(b,
learnRate,
convSize,
MatrixSize(filterSize[0],filterSize[1]),
t_prevLayerReferences[(int)prevId],
stride);
long weightsSize = filterSize[0]*filterSize[1]*inputSize[2]*convSize
*((int)((inputSize[0]-filterSize[0]+1)/stride)) //size.x
*((int)((inputSize[1]-filterSize[1]+1)/stride)); //size.y
float *weights = (float*) malloc(sizeof(float)*weightsSize);
float buff;
for(int i=0; i<weightsSize; i++)
{
t_file >> buff;
weights[i] = buff;
}
layer->setWeights(weights);
for(int i=0; i<weightsSize; i++)
{
t_file >> buff;
weights[i] = buff;
}
layer->setMomentum1(weights);
for(int i=0; i<weightsSize; i++)
{
t_file >> buff;
weights[i] = buff;
}
layer->setMomentum2(weights);
free(weights);
return layer;
}
/*
*
*/
void ConvSeparateWeightsLayer::drawLayer()
{
std::vector<double> output = getOutput();
for(int z=0; z<size.z; z++)
{
cv::Mat image = cv::Mat(size.y, size.x, CV_8UC3);
for(int y=0; y<size.y; y++)
{
for(int x=0; x<size.x; x++)
{
uchar* ptrDst = image.ptr(y)+(x+x+x);
int src = output[z*size.x*size.y + y*size.x + x]*255;
ptrDst[0] = src;
ptrDst[1] = src;
ptrDst[2] = src;
}
}
cv::resize(image, image, cv::Size(), 8, 8,cv::INTER_CUBIC);
//Print
imshow(std::to_string(z), image);
cv::waitKey(3);
}
}
/*
*
*/
void ConvSeparateWeightsLayer::printInfo()
{
TensorSize prevTSize;
hipMemcpy(&prevTSize, d_inputSize, sizeof(TensorSize), hipMemcpyDeviceToHost);
int weightsSize = filterSize.m*prevTSize.z*size.z*size.y*size.x;
std::cout << " (" << layerId << ") ConvSep <-- " << prevLayerId << " : ";
std::cout << prevTSize.x << "x" << prevTSize.y << "x" << prevTSize.z << " -> " << size.x << "x" << size.y << "x" << size.z;
std::cout << " w:" << weightsSize << "\n";
}
}
| ae96c48b26457f6e424019a862650f22e28fc9c8.cu | //
// Created by przemo on 27.12.2019.
//
#include "ConvSeparateWeightsLayer.h"
namespace NeuralNetworkGPU
{
/*
*
*/
__global__ void determineOutputFuncConvSW(float *t_input, TensorSize *t_inputSize,
float *t_output,
float *t_sums,
float *t_weights, MatrixSize *t_filterSize,
float *t_deltas,
float *d_b,
int *d_stride)
{
long index = blockIdx.x + blockIdx.y*gridDim.x + threadIdx.x*gridDim.x*gridDim.y;
//sums x[i]*w[i]
int yFrame = t_inputSize->x*t_inputSize->y;
int yfFrame = t_filterSize->y*t_filterSize->x;
int xOffset = *d_stride*blockIdx.x;
int yOffset = *d_stride*blockIdx.y*t_inputSize->x;
int zfOffset = yfFrame*t_inputSize->z*index;
float sum = 0;
for(int y=0,yf=0,yi=yOffset; y<t_filterSize->y; y++)
{
for(int x=0; x<t_filterSize->x; x++)
{
for(int z=0,zf=zfOffset,zi=0; z<t_inputSize->z; z++)
{
sum += t_input[xOffset+x + yi + zi] * t_weights[x + yf + zf];
zf+=yfFrame;
zi+=yFrame;
}
}
yf+=t_filterSize->x;
yi+=t_inputSize->y;
}
t_sums[index] = sum;
// sum = sum > 255 ? 255 : sum;
// sum = sum < -255 ? -255 : sum;
//reset delta
t_deltas[index] = 0;
//activation function
t_output[index] =
1 / (1 + exp(-(*d_b)*sum) ); //sigmoid function
// sum > 0 ? sum : sum*0.05; //RELU function
}
/*
*
*/
__global__ void learnSGDConvSW(float *t_input, TensorSize *t_inputSize,
float *t_output,
float *t_sums,
float *t_weights, MatrixSize *t_filterSize,
float *t_deltas, float *t_prevDeltas,
float *d_n,float *d_b)
{
long index = blockIdx.x + blockIdx.y*gridDim.x + threadIdx.x*gridDim.x*gridDim.y;
float delta = t_deltas[index];
//determine common multiplier
float e = exp(-(*d_b)*t_sums[index]);
float m = 1 + e;
float derivative = ((*d_b)*e/(m*m));
// float derivative = t_sums[index] > 0 ? 1 : 0.05;
float p = (*d_n)* delta * derivative;
//calculate new weights
int yFrame = t_inputSize->x*t_inputSize->y;
int yfFrame = t_filterSize->y*t_filterSize->x;
int yOffset = blockIdx.y*t_inputSize->x;
int zfOffset = yfFrame*t_inputSize->z*index;
for(int y=0,yf=0,yi=yOffset; y<t_filterSize->y; y++)
{
for(int x=0; x<t_filterSize->x; x++)
{
for(int z=0,zf=zfOffset,zi=0; z<t_inputSize->z; z++)
{
t_weights[ x + yf + zf ] -= p*t_input[blockIdx.x+x + yi + zi];
zf+=yfFrame;
zi+=yFrame;
}
}
yf+=t_filterSize->x;
yi+=t_inputSize->y;
}
//set delta to deeper neurons
if(t_prevDeltas != nullptr)
{
float dd = delta*derivative;
for(int y=0,yf=0,yi=yOffset; y<t_filterSize->y; y++)
{
for(int x=0; x<t_filterSize->x; x++)
{
for(int z=0,zf=0,zi=0; z<t_inputSize->z; z++)
{
t_prevDeltas[blockIdx.x+x + yi + zi] += dd * t_weights[ x + yf + zf ];
zf+=yfFrame;
zi+=yFrame;
}
}
yf+=t_filterSize->x;
yi+=t_inputSize->y;
}
}
//reset delta
t_deltas[index] = 0;
}
/*
*
*/
__global__ void learnAdamConvSW(float *t_input, TensorSize *t_inputSize,
float *t_output,
float *t_sums,
float *t_weights, MatrixSize *t_filterSize,
float *t_deltas, float *t_prevDeltas,
float *t_m,float *t_v,
float *t_n,float *t_b,
float *t_B1,float *t_B2,
int *d_stride)
{
long index = blockIdx.x + blockIdx.y*gridDim.x + threadIdx.x*gridDim.x*gridDim.y;
float delta = t_deltas[index];
//determine derivative and gradients
float e = exp(-(*t_b)*t_sums[index]);
float m = 1 + e;
float derivative = ((*t_b)*e/(m*m));
// float sum = t_sums[index];
// float derivative = sum > 0 && sum < 65536 ? 1 : 0.05;
float grad = delta*derivative; // gradient without x factor
float grad2 = grad*grad;
//calculate new weights
int yFrame = t_inputSize->x*t_inputSize->y;
int yfFrame = t_filterSize->y*t_filterSize->x;
int xOffset = *d_stride*blockIdx.x;
int yOffset = *d_stride*blockIdx.y*t_inputSize->x;
int zfOffset = yfFrame*t_inputSize->z*index;
float mTarget,vTarget;
float mNew, vNew;
for(int y=0,yf=0,yi=yOffset; y<t_filterSize->y; y++)
{
for(int x=0; x<t_filterSize->x; x++)
{
for(int z=0,zf=zfOffset,zi=0; z<t_inputSize->z; z++)
{
float input = t_input[xOffset+x + yi + zi];
//calculate new m & v
mTarget = grad*input;
vTarget = grad2*input*input;
mNew = mTarget - (*t_B1)*(mTarget-t_m[x + yf + zf]);
vNew = vTarget - (*t_B2)*(vTarget-t_v[x + yf + zf]);
t_m[x + yf + zf] = mNew;
t_v[x + yf + zf] = vNew;
//update weights
t_weights[x + yf + zf] -= __fdiv_rd ((*t_n)*mNew , (__fsqrt_rd(vNew)+0.0000001));
zf+=yfFrame;
zi+=yFrame;
}
}
yf+=t_filterSize->x;
yi+=t_inputSize->y;
}
//set delta to deeper neurons
if(t_prevDeltas != nullptr)
{
float dd = delta*derivative;
for(int y=0,yf=0,yi=yOffset; y<t_filterSize->y; y++)
{
for(int x=0; x<t_filterSize->x; x++)
{
for(int z=0,zf=zfOffset,zi=0; z<t_inputSize->z; z++)
{
t_prevDeltas[blockIdx.x+x + yi + zi] += dd * t_weights[ x + yf + zf ];
zf+=yfFrame;
zi+=yFrame;
}
}
yf+=t_filterSize->x;
yi+=t_inputSize->y;
}
}
//reset delta
t_deltas[index] = 0;
}
/*
*
*/
__global__ void scaleWeightsConvSW(TensorSize *t_inputSize,
float *t_weights, MatrixSize *t_filterSize)
{
//calculate new weights
int yfFrame = t_filterSize->y*t_filterSize->x;
int zfOffset = yfFrame*t_inputSize->z*threadIdx.x;
float sum = 0;
for(int y=0,yf=0; y<t_filterSize->y; y++)
{
for(int x=0; x<t_filterSize->x; x++)
{
for(int z=0,zf=zfOffset; z<t_inputSize->z; z++)
{
sum = abs(t_weights[ x + yf + zf ]) > sum ? abs(t_weights[ x + yf + zf ]) : sum;
zf+=yfFrame;
}
}
yf+=t_filterSize->x;
}
__fdiv_rd(sum,100);
for(int y=0,yf=0; y<t_filterSize->y; y++)
{
for(int x=0; x<t_filterSize->x; x++)
{
for(int z=0,zf=zfOffset; z<t_inputSize->z; z++)
{
t_weights[ x + yf + zf ] = __fdiv_rd(t_weights[ x + yf + zf ],sum);
zf+=yfFrame;
}
}
yf+=t_filterSize->x;
}
}
/*
*
*/
ConvSeparateWeightsLayer::ConvSeparateWeightsLayer(float t_parameterB, float t_learnRate, int convLayers,
MatrixSize t_filterSize, NeuronsPtr t_prevLayerReference, int t_stride)
{
float b1 = 0.9, b2 = 0.999;
prevLayerId = t_prevLayerReference.id;
size = TensorSize((t_prevLayerReference.tSize.x-t_filterSize.x+1)/t_stride,
(t_prevLayerReference.tSize.y-t_filterSize.y+1)/t_stride,
convLayers);
de_input = t_prevLayerReference.inputPtr;
//learn rate
cudaMalloc( (void **) &d_n, sizeof(float));
cudaMemcpy(d_n, &(t_learnRate), sizeof(float), cudaMemcpyHostToDevice);
//parameter b
cudaMalloc( (void **) &d_b, sizeof(float));
cudaMemcpy(d_b, &(t_parameterB), sizeof(float), cudaMemcpyHostToDevice);
//Adam parameters
cudaMalloc( (void **) &d_B1, sizeof(float));
cudaMemcpy(d_B1, &(b1), sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc( (void **) &d_B2, sizeof(float));
cudaMemcpy(d_B2, &(b2), sizeof(float), cudaMemcpyHostToDevice);
//stride
int stride = t_stride;
cudaMalloc( (void **) &d_stride, sizeof(int));
cudaMemcpy(d_stride, &(stride), sizeof(int), cudaMemcpyHostToDevice);
//input size
cudaMalloc( (void **) &d_inputSize, sizeof(TensorSize));
cudaMemcpy(d_inputSize, &t_prevLayerReference.tSize, sizeof(TensorSize), cudaMemcpyHostToDevice);
inputSize = t_prevLayerReference.tSize;
//output
cudaMalloc( (void **) &d_output, sizeof(float)*size.m);
output = (float*) std::malloc(sizeof(float)*size.m);
//filter size
filterSize = t_filterSize;
cudaMalloc( (void **) &d_filterSize, sizeof(MatrixSize));
cudaMemcpy(d_filterSize, &t_filterSize, sizeof(MatrixSize), cudaMemcpyHostToDevice);
//weights
long weightsSize = t_filterSize.m*t_prevLayerReference.tSize.z*size.z*size.y*size.x;
cudaMalloc( (void **) &d_weights, sizeof(float)*weightsSize);
initWeights();
//sums
cudaMalloc( (void **) &d_sums, sizeof(float)*size.m);
//deltas
cudaMalloc( (void **) &d_deltas, sizeof(float)*size.m);
deltas = (float*) malloc(sizeof(float)*size.m);
de_prevDeltas = t_prevLayerReference.deltaPtr;
//adam learn
float *zeros = (float*) malloc(sizeof(float)*weightsSize);
for(int i=0; i<weightsSize; i++) zeros[i] = 0;
cudaMalloc( (void **) &d_m, sizeof(float)*weightsSize);
cudaMemcpy(d_m, zeros, sizeof(float)*weightsSize, cudaMemcpyHostToDevice);
cudaMalloc( (void **) &d_v, sizeof(float)*weightsSize);
cudaMemcpy(d_v, zeros, sizeof(float)*weightsSize, cudaMemcpyHostToDevice);
free(zeros);
}
/*
*
*/
ConvSeparateWeightsLayer::~ConvSeparateWeightsLayer()
{
cudaFree(d_n);
cudaFree(d_b);
cudaFree(d_B1);
cudaFree(d_B2);
cudaFree(d_inputSize);
cudaFree(d_output);
cudaFree(d_sums);
cudaFree(d_weights);
cudaFree(d_filterSize);
cudaFree(d_deltas);
cudaFree(d_m);
cudaFree(d_v);
free(output);
free(deltas);
}
/*
*
*/
void ConvSeparateWeightsLayer::initWeights()
{
long weightsSize = filterSize.m*inputSize.z*size.z*size.y*size.x;
float *randomValues = (float*) malloc(sizeof(float)*weightsSize);
for(int i=0; i< weightsSize; i++)
{
float randomValue = getRandomWeight();
randomValues[i] = randomValue;
}
cudaMemcpy(d_weights, randomValues, sizeof(float)*weightsSize, cudaMemcpyHostToDevice);
free(randomValues);
}
/*
*
*/
void ConvSeparateWeightsLayer::setWeights(float* t_weights)
{
long weightsSize = filterSize.m*inputSize.z*size.z*size.y*size.x;
cudaMemcpy(d_weights, t_weights, sizeof(float)*weightsSize, cudaMemcpyHostToDevice);
}
/*
*
*/
void ConvSeparateWeightsLayer::setMomentum1(float* t_momentum)
{
long weightsSize = filterSize.m*inputSize.z*size.z*size.y*size.x;
cudaMemcpy(d_m, t_momentum, sizeof(float)*weightsSize, cudaMemcpyHostToDevice);
}
/*
*
*/
void ConvSeparateWeightsLayer::setMomentum2(float* t_momentum)
{
long weightsSize = filterSize.m*inputSize.z*size.z*size.y*size.x;
cudaMemcpy(d_v, t_momentum, sizeof(float)*weightsSize, cudaMemcpyHostToDevice);
}
/*
*
*/
std::vector<double> ConvSeparateWeightsLayer::getOutput()
{
cudaMemcpy(output, d_output, sizeof(float)*size.m, cudaMemcpyDeviceToHost);
std::vector<double> result;
int outputSize = size.multiply();
for(int i=0; i<outputSize; i++ )
{
double v = output[i];
result.push_back(v);
}
return result;
}
void ConvSeparateWeightsLayer::determineOutput()
{
dim3 threadsPerBlock(size.z);
dim3 numBlocks(size.x, size.y);
determineOutputFuncConvSW<<< numBlocks , threadsPerBlock >>>(de_input, d_inputSize,
d_output,
d_sums,
d_weights, d_filterSize,
d_deltas,
d_b,
d_stride);
}
void ConvSeparateWeightsLayer::learnSGD()
{
// int64 timeBefore = cv::getTickCount();
dim3 threadsPerBlock(size.z);
dim3 numBlocks(size.x, size.y);
learnSGDConvSW<<< numBlocks , threadsPerBlock >>>(de_input, d_inputSize,
d_output,
d_sums,
d_weights, d_filterSize,
d_deltas, de_prevDeltas,
d_n, d_b);
// int64 afterBefore = cv::getTickCount();
// std::cout << "Sigm: " << (afterBefore - timeBefore)/ cv::getTickFrequency() << "\n";
}
void ConvSeparateWeightsLayer::learnAdam()
{
dim3 threadsPerBlock(size.z);
dim3 numBlocks(size.x, size.y);
learnAdamConvSW<<< numBlocks , threadsPerBlock >>>(de_input, d_inputSize,
d_output,
d_sums,
d_weights, d_filterSize,
d_deltas, de_prevDeltas,
d_m, d_v,
d_n, d_b,
d_B1, d_B2,
d_stride);
}
/*
*
*/
NeuronsPtr ConvSeparateWeightsLayer::getNeuronPtr()
{
return NeuronsPtr(layerId, d_output,size, d_deltas);
}
/*
*
*/
void ConvSeparateWeightsLayer::saveToFile(std::ofstream &t_file)
{
t_file << (float) getLayerTypeId() << ' '; //Signature of SigmoidLayer
t_file << (float) prevLayerId << ' '; //Id of previous layer
t_file << (float) inputSize.x << ' ';
t_file << (float) inputSize.y << ' ';
t_file << (float) inputSize.z << ' ';
t_file << (float) size.z << ' ';
t_file << (float) filterSize.x << ' ';
t_file << (float) filterSize.y << ' ';
float learnRate;
cudaMemcpy(&learnRate, d_n, sizeof(float), cudaMemcpyDeviceToHost);
t_file << learnRate << ' ';
float b;
cudaMemcpy(&b, d_b, sizeof(float), cudaMemcpyDeviceToHost);
t_file << b << ' ';
int stride;
cudaMemcpy(&stride, d_stride, sizeof(int), cudaMemcpyDeviceToHost);
t_file << (float) stride << ' ';
//Weights
long weightsSize = filterSize.m*inputSize.z*size.z*size.y*size.x;
float *weights = (float*) malloc(sizeof(float)*weightsSize);
cudaMemcpy(weights, d_weights, sizeof(float)*weightsSize, cudaMemcpyDeviceToHost);
for(int i=0; i< weightsSize; i++)
{
t_file << weights[i] << ' ';
}
cudaMemcpy(weights, d_m, sizeof(float)*weightsSize, cudaMemcpyDeviceToHost);
for(int i=0; i< weightsSize; i++)
{
t_file << weights[i] << ' ';
}
cudaMemcpy(weights, d_v, sizeof(float)*weightsSize, cudaMemcpyDeviceToHost);
for(int i=0; i< weightsSize; i++)
{
t_file << weights[i] << ' ';
}
free(weights);
}
/*
*
*/
ConvSeparateWeightsLayer* ConvSeparateWeightsLayer::loadFromFile(std::ifstream &t_file, std::vector<NeuronsPtr> &t_prevLayerReferences)
{
float filterSize[2], convSize, inputSize[3];
float learnRate, b, stridef;
float prevId;
int stride;
t_file >> prevId;
t_file >> inputSize[0];
t_file >> inputSize[1];
t_file >> inputSize[2];
t_file >> convSize;
t_file >> filterSize[0];
t_file >> filterSize[1];
t_file >> learnRate;
t_file >> b;
t_file >> stridef;
stride = stridef;
ConvSeparateWeightsLayer* layer = new ConvSeparateWeightsLayer(b,
learnRate,
convSize,
MatrixSize(filterSize[0],filterSize[1]),
t_prevLayerReferences[(int)prevId],
stride);
long weightsSize = filterSize[0]*filterSize[1]*inputSize[2]*convSize
*((int)((inputSize[0]-filterSize[0]+1)/stride)) //size.x
*((int)((inputSize[1]-filterSize[1]+1)/stride)); //size.y
float *weights = (float*) malloc(sizeof(float)*weightsSize);
float buff;
for(int i=0; i<weightsSize; i++)
{
t_file >> buff;
weights[i] = buff;
}
layer->setWeights(weights);
for(int i=0; i<weightsSize; i++)
{
t_file >> buff;
weights[i] = buff;
}
layer->setMomentum1(weights);
for(int i=0; i<weightsSize; i++)
{
t_file >> buff;
weights[i] = buff;
}
layer->setMomentum2(weights);
free(weights);
return layer;
}
/*
*
*/
void ConvSeparateWeightsLayer::drawLayer()
{
std::vector<double> output = getOutput();
for(int z=0; z<size.z; z++)
{
cv::Mat image = cv::Mat(size.y, size.x, CV_8UC3);
for(int y=0; y<size.y; y++)
{
for(int x=0; x<size.x; x++)
{
uchar* ptrDst = image.ptr(y)+(x+x+x);
int src = output[z*size.x*size.y + y*size.x + x]*255;
ptrDst[0] = src;
ptrDst[1] = src;
ptrDst[2] = src;
}
}
cv::resize(image, image, cv::Size(), 8, 8,cv::INTER_CUBIC);
//Print
imshow(std::to_string(z), image);
cv::waitKey(3);
}
}
/*
*
*/
void ConvSeparateWeightsLayer::printInfo()
{
TensorSize prevTSize;
cudaMemcpy(&prevTSize, d_inputSize, sizeof(TensorSize), cudaMemcpyDeviceToHost);
int weightsSize = filterSize.m*prevTSize.z*size.z*size.y*size.x;
std::cout << " (" << layerId << ") ConvSep <-- " << prevLayerId << " : ";
std::cout << prevTSize.x << "x" << prevTSize.y << "x" << prevTSize.z << " -> " << size.x << "x" << size.y << "x" << size.z;
std::cout << " w:" << weightsSize << "\n";
}
}
|
d1400394a54a9874152290a074a0770c4e0c2bcc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <iostream>
#include "caffe/layers/conv_layer.hpp"
#include "caffe/common.cuh"
#include "caffe/proto/caffe.pb.h"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/neuron_layer.hpp"
using namespace std;
namespace caffe {
template <typename Dtype>
void ConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
const Dtype* bottom_dilation = NULL;
if (bottom.size() == 2) {
bottom_dilation = bottom[1]->gpu_data();
}
// Here, we modify original caffe to receive only one data input, the second optinal input is dilation matrix.
for (int i = 0; i < 1; ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
if (bottom.size() == 2) {
for (int n = 0; n < this->num_; ++n) {
this->forward_gpu_dynamic_dilation_gemm(bottom_data + n * this->bottom_dim_, bottom_dilation + bottom[1]->offset(n), weight,
top_data + n * this->top_dim_);
if (this->bias_term_) {
const Dtype* bias = this->blobs_[1]->gpu_data();
this->forward_gpu_bias(top_data + n * this->top_dim_, bias);
}
}
} else {
for (int n = 0; n < this->num_; ++n) {
this->forward_gpu_gemm(bottom_data + n * this->bottom_dim_, weight,
top_data + n * this->top_dim_);
if (this->bias_term_) {
const Dtype* bias = this->blobs_[1]->gpu_data();
this->forward_gpu_bias(top_data + n * this->top_dim_, bias);
}
}
}
}
}
template <typename Dtype>
__global__ void DilationBackward(const unsigned int nthreads,
const Dtype* data_cc, const Dtype* diff_top,
const int output_num, const int height, const int width,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int w = (index) % width;
int h = (index / width) % height;
atomicAdd(&bottom_diff[h * width + w], data_cc[index] * diff_top[index]);
}
}
template <typename Dtype>
void ConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = this->blobs_[0]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
const Dtype* bottom_dilation = NULL;
Dtype* bottom_dilation_diff = NULL;
int height_ = this->output_shape_[0];
int width_ = this->output_shape_[1];
if (bottom.size() == 2) {
bottom_dilation = bottom[1]->gpu_data();
bottom_dilation_diff = bottom[1]->mutable_gpu_diff();
caffe_gpu_set(this->num_ * height_ * width_, Dtype(0), bottom_dilation_diff);
}
for (int i = 0; i < 1; ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
// Bias gradient, if necessary.
if (this->bias_term_ && this->param_propagate_down_[1]) {
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
for (int n = 0; n < this->num_; ++n) {
this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_);
}
}
if (this->param_propagate_down_[0] || propagate_down[i] || this->force_back_propagation_) {
if (bottom.size() == 2) {
for (int n = 0; n < this->num_; ++n) {
// gradient w.r.t. weight. Note that we will accumulate diffs.
if (this->param_propagate_down_[0]) {
this->weight_gpu_dynamic_dilation_gemm(bottom_data + n * this->bottom_dim_, bottom_dilation + bottom[1]->offset(n),
top_diff + n * this->top_dim_, weight_diff);
}
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[i] || this->force_back_propagation_) {
this->backward_gpu_dynamic_dilation_gemm(top_diff + n * this->top_dim_, bottom_dilation + bottom[1]->offset(n), weight,
bottom_diff + n * this->bottom_dim_);
int kernel_h_ = this->kernel_shape_data[0];
int kernel_w_ = this->kernel_shape_data[1];
int stride_h_ = this->stride_data[0];
int stride_w_ = this->stride_data[1];
int pad_h_ = this->pad_data[0];
int pad_w_ = this->pad_data[1];
const int kernel_h_eff = kernel_h_ + (kernel_h_ - 1) * (pad_h_ - 1);
const int kernel_w_eff = kernel_w_ + (kernel_w_ - 1) * (pad_w_ - 1);
int height_col = (height_ + 2 * pad_h_ - kernel_h_eff) / stride_h_ + 1;
int width_col = (width_ + 2 * pad_w_ - kernel_w_eff) / stride_w_ + 1;
this->backward_gpu_dynamic_dilation_col2im_gemm(top_diff + n * this->top_dim_, bottom_data + bottom[0]->offset(n), bottom_dilation + bottom[1]->offset(n), weight,
bottom_dilation_diff + bottom[1]->offset(n));
}
}
} else {
for (int n = 0; n < this->num_; ++n) {
// gradient w.r.t. weight. Note that we will accumulate diffs.
if (this->param_propagate_down_[0]) {
this->weight_gpu_gemm(bottom_data + n * this->bottom_dim_,
top_diff + n * this->top_dim_, weight_diff);
}
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[i] || this->force_back_propagation_) {
this->backward_gpu_gemm(top_diff + n * this->top_dim_, weight,
bottom_diff + n * this->bottom_dim_);
}
}
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ConvolutionLayer);
} // namespace caffe
| d1400394a54a9874152290a074a0770c4e0c2bcc.cu | #include <vector>
#include <iostream>
#include "caffe/layers/conv_layer.hpp"
#include "caffe/common.cuh"
#include "caffe/proto/caffe.pb.h"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/neuron_layer.hpp"
using namespace std;
namespace caffe {
template <typename Dtype>
void ConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
const Dtype* bottom_dilation = NULL;
if (bottom.size() == 2) {
bottom_dilation = bottom[1]->gpu_data();
}
// Here, we modify original caffe to receive only one data input, the second optinal input is dilation matrix.
for (int i = 0; i < 1; ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
if (bottom.size() == 2) {
for (int n = 0; n < this->num_; ++n) {
this->forward_gpu_dynamic_dilation_gemm(bottom_data + n * this->bottom_dim_, bottom_dilation + bottom[1]->offset(n), weight,
top_data + n * this->top_dim_);
if (this->bias_term_) {
const Dtype* bias = this->blobs_[1]->gpu_data();
this->forward_gpu_bias(top_data + n * this->top_dim_, bias);
}
}
} else {
for (int n = 0; n < this->num_; ++n) {
this->forward_gpu_gemm(bottom_data + n * this->bottom_dim_, weight,
top_data + n * this->top_dim_);
if (this->bias_term_) {
const Dtype* bias = this->blobs_[1]->gpu_data();
this->forward_gpu_bias(top_data + n * this->top_dim_, bias);
}
}
}
}
}
template <typename Dtype>
__global__ void DilationBackward(const unsigned int nthreads,
const Dtype* data_cc, const Dtype* diff_top,
const int output_num, const int height, const int width,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int w = (index) % width;
int h = (index / width) % height;
atomicAdd(&bottom_diff[h * width + w], data_cc[index] * diff_top[index]);
}
}
template <typename Dtype>
void ConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = this->blobs_[0]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
const Dtype* bottom_dilation = NULL;
Dtype* bottom_dilation_diff = NULL;
int height_ = this->output_shape_[0];
int width_ = this->output_shape_[1];
if (bottom.size() == 2) {
bottom_dilation = bottom[1]->gpu_data();
bottom_dilation_diff = bottom[1]->mutable_gpu_diff();
caffe_gpu_set(this->num_ * height_ * width_, Dtype(0), bottom_dilation_diff);
}
for (int i = 0; i < 1; ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
// Bias gradient, if necessary.
if (this->bias_term_ && this->param_propagate_down_[1]) {
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
for (int n = 0; n < this->num_; ++n) {
this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_);
}
}
if (this->param_propagate_down_[0] || propagate_down[i] || this->force_back_propagation_) {
if (bottom.size() == 2) {
for (int n = 0; n < this->num_; ++n) {
// gradient w.r.t. weight. Note that we will accumulate diffs.
if (this->param_propagate_down_[0]) {
this->weight_gpu_dynamic_dilation_gemm(bottom_data + n * this->bottom_dim_, bottom_dilation + bottom[1]->offset(n),
top_diff + n * this->top_dim_, weight_diff);
}
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[i] || this->force_back_propagation_) {
this->backward_gpu_dynamic_dilation_gemm(top_diff + n * this->top_dim_, bottom_dilation + bottom[1]->offset(n), weight,
bottom_diff + n * this->bottom_dim_);
int kernel_h_ = this->kernel_shape_data[0];
int kernel_w_ = this->kernel_shape_data[1];
int stride_h_ = this->stride_data[0];
int stride_w_ = this->stride_data[1];
int pad_h_ = this->pad_data[0];
int pad_w_ = this->pad_data[1];
const int kernel_h_eff = kernel_h_ + (kernel_h_ - 1) * (pad_h_ - 1);
const int kernel_w_eff = kernel_w_ + (kernel_w_ - 1) * (pad_w_ - 1);
int height_col = (height_ + 2 * pad_h_ - kernel_h_eff) / stride_h_ + 1;
int width_col = (width_ + 2 * pad_w_ - kernel_w_eff) / stride_w_ + 1;
this->backward_gpu_dynamic_dilation_col2im_gemm(top_diff + n * this->top_dim_, bottom_data + bottom[0]->offset(n), bottom_dilation + bottom[1]->offset(n), weight,
bottom_dilation_diff + bottom[1]->offset(n));
}
}
} else {
for (int n = 0; n < this->num_; ++n) {
// gradient w.r.t. weight. Note that we will accumulate diffs.
if (this->param_propagate_down_[0]) {
this->weight_gpu_gemm(bottom_data + n * this->bottom_dim_,
top_diff + n * this->top_dim_, weight_diff);
}
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[i] || this->force_back_propagation_) {
this->backward_gpu_gemm(top_diff + n * this->top_dim_, weight,
bottom_diff + n * this->bottom_dim_);
}
}
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ConvolutionLayer);
} // namespace caffe
|
3ad4adba56d3fbb883bca9e548741b11f780a19b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cuda_help.h"
#include "eye.h"
#include "proj.h"
using namespace Legion;
namespace legate {
namespace numpy {
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_eye(const AccessorRW<T, 2> out, const Point<2> start, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
out[start[0] + offset][start[1] + offset] = 1;
}
template<typename T>
/*static*/ void EyeTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx,
Runtime* runtime) {
LegateDeserializer derez(task->args, task->arglen);
// We know this is 2-D
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
const AccessorRW<T, 2> out = derez.unpack_accessor_RW<T, 2>(regions[0], rect);
const int k = derez.unpack_32bit_int();
// Solve for the start
// y = x + k
// x >= rect.lo[0]
const Point<2> start1(rect.lo[0], rect.lo[0] + k);
// y >= rect.lo[1]
const Point<2> start2(rect.lo[1] - k, rect.lo[1]);
// If we don't have a start point then there's nothing for us to do
if (!rect.contains(start1) && !rect.contains(start2)) return;
// Pick whichever one fits in our rect
const Point<2> start = rect.contains(start1) ? start1 : start2;
// Now do the same thing for the end
// x <= rect.hi[0]
const Point<2> stop1(rect.hi[0], rect.hi[0] + k);
// y <= rect.hi[1]
const Point<2> stop2(rect.hi[1] - k, rect.hi[1]);
assert(rect.contains(stop1) || rect.contains(stop2));
const Point<2> stop = rect.contains(stop1) ? stop1 : stop2;
// Walk the path from the stop to the start
const coord_t distance = (stop[0] - start[0]) + 1;
// Should be the same along both dimensions
assert(distance == ((stop[1] - start[1]) + 1));
const size_t blocks = (distance + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( legate_eye<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, start, distance);
}
INSTANTIATE_TASK_VARIANT(EyeTask, gpu_variant)
} // namespace numpy
} // namespace legate
| 3ad4adba56d3fbb883bca9e548741b11f780a19b.cu | /* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cuda_help.h"
#include "eye.h"
#include "proj.h"
using namespace Legion;
namespace legate {
namespace numpy {
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_eye(const AccessorRW<T, 2> out, const Point<2> start, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
out[start[0] + offset][start[1] + offset] = 1;
}
template<typename T>
/*static*/ void EyeTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx,
Runtime* runtime) {
LegateDeserializer derez(task->args, task->arglen);
// We know this is 2-D
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
const AccessorRW<T, 2> out = derez.unpack_accessor_RW<T, 2>(regions[0], rect);
const int k = derez.unpack_32bit_int();
// Solve for the start
// y = x + k
// x >= rect.lo[0]
const Point<2> start1(rect.lo[0], rect.lo[0] + k);
// y >= rect.lo[1]
const Point<2> start2(rect.lo[1] - k, rect.lo[1]);
// If we don't have a start point then there's nothing for us to do
if (!rect.contains(start1) && !rect.contains(start2)) return;
// Pick whichever one fits in our rect
const Point<2> start = rect.contains(start1) ? start1 : start2;
// Now do the same thing for the end
// x <= rect.hi[0]
const Point<2> stop1(rect.hi[0], rect.hi[0] + k);
// y <= rect.hi[1]
const Point<2> stop2(rect.hi[1] - k, rect.hi[1]);
assert(rect.contains(stop1) || rect.contains(stop2));
const Point<2> stop = rect.contains(stop1) ? stop1 : stop2;
// Walk the path from the stop to the start
const coord_t distance = (stop[0] - start[0]) + 1;
// Should be the same along both dimensions
assert(distance == ((stop[1] - start[1]) + 1));
const size_t blocks = (distance + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
legate_eye<T><<<blocks, THREADS_PER_BLOCK>>>(out, start, distance);
}
INSTANTIATE_TASK_VARIANT(EyeTask, gpu_variant)
} // namespace numpy
} // namespace legate
|
3ca4c91ff7f657ee44429d26738e48e9211e326c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "dataToTex.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
uint *indices = NULL;
hipMalloc(&indices, XSIZE*YSIZE);
float4 *g_odata = NULL;
hipMalloc(&g_odata, XSIZE*YSIZE);
int imgw = 1;
int imgh = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
dataToTex), dim3(gridBlock),dim3(threadBlock), 0, 0, indices,g_odata,imgw,imgh);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
dataToTex), dim3(gridBlock),dim3(threadBlock), 0, 0, indices,g_odata,imgw,imgh);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
dataToTex), dim3(gridBlock),dim3(threadBlock), 0, 0, indices,g_odata,imgw,imgh);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 3ca4c91ff7f657ee44429d26738e48e9211e326c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "dataToTex.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
uint *indices = NULL;
cudaMalloc(&indices, XSIZE*YSIZE);
float4 *g_odata = NULL;
cudaMalloc(&g_odata, XSIZE*YSIZE);
int imgw = 1;
int imgh = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
dataToTex<<<gridBlock,threadBlock>>>(indices,g_odata,imgw,imgh);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
dataToTex<<<gridBlock,threadBlock>>>(indices,g_odata,imgw,imgh);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
dataToTex<<<gridBlock,threadBlock>>>(indices,g_odata,imgw,imgh);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
59c480d3624397b3443149543710fc33b974ec67.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void loop()
{
/*
* This idiomatic expression gives each thread
* a unique index within the entire grid.
*/
int i = blockIdx.x * blockDim.x + threadIdx.x;
printf("%d\n", i);
}
int main()
{
/*
* Additional execution configurations that would
* work and meet the exercises contraints are:
*
* <<<5, 2>>>
* <<<10, 1>>>
*/
hipLaunchKernelGGL(( loop), dim3(2), dim3(5), 0, 0, );
hipDeviceSynchronize();
} | 59c480d3624397b3443149543710fc33b974ec67.cu | #include <stdio.h>
__global__ void loop()
{
/*
* This idiomatic expression gives each thread
* a unique index within the entire grid.
*/
int i = blockIdx.x * blockDim.x + threadIdx.x;
printf("%d\n", i);
}
int main()
{
/*
* Additional execution configurations that would
* work and meet the exercises contraints are:
*
* <<<5, 2>>>
* <<<10, 1>>>
*/
loop<<<2, 5>>>();
cudaDeviceSynchronize();
} |
a0f3a7083e96408edc923eaa8d61aa32d50aa725.hip | // !!! This is a file automatically generated by hipify!!!
// *************************************************************************
//
// PARALUTION www.paralution.com
//
// Copyright (C) 2012-2014 Dimitar Lukarski
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//
// *************************************************************************
// PARALUTION version 0.7.0
#include "gpu_matrix_csr.hpp"
#include "gpu_matrix_coo.hpp"
#include "gpu_matrix_dia.hpp"
#include "gpu_matrix_ell.hpp"
#include "gpu_matrix_hyb.hpp"
#include "gpu_matrix_mcsr.hpp"
#include "gpu_matrix_bcsr.hpp"
#include "gpu_matrix_dense.hpp"
#include "gpu_vector.hpp"
#include "../host/host_matrix_dia.hpp"
#include "../base_matrix.hpp"
#include "../base_vector.hpp"
#include "../backend_manager.hpp"
#include "../../utils/log.hpp"
#include "../../utils/allocate_free.hpp"
#include "gpu_utils.hpp"
#include "cuda_kernels_general.hpp"
#include "cuda_kernels_dia.hpp"
#include "cuda_kernels_vector.hpp"
#include "gpu_allocate_free.hpp"
#include "../matrix_formats_ind.hpp"
#include <assert.h>
#include <hip/hip_runtime.h>
#include <cusparse_v2.h>
namespace paralution {
template <typename ValueType>
GPUAcceleratorMatrixDIA<ValueType>::GPUAcceleratorMatrixDIA() {
// no default constructors
LOG_INFO("no default constructor");
FATAL_ERROR(__FILE__, __LINE__);
}
template <typename ValueType>
GPUAcceleratorMatrixDIA<ValueType>::GPUAcceleratorMatrixDIA(const Paralution_Backend_Descriptor local_backend) {
LOG_DEBUG(this, "GPUAcceleratorMatrixDIA::GPUAcceleratorMatrixDIA()",
"constructor with local_backend");
this->mat_.val = NULL;
this->mat_.offset = NULL;
this->mat_.num_diag = 0 ;
this->set_backend(local_backend);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
template <typename ValueType>
GPUAcceleratorMatrixDIA<ValueType>::~GPUAcceleratorMatrixDIA() {
LOG_DEBUG(this, "GPUAcceleratorMatrixDIA::GPUAcceleratorMatrixDIA()",
"destructor");
this->Clear();
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::info(void) const {
LOG_INFO("GPUAcceleratorMatrixDIA<ValueType> diag=" << this->get_ndiag() << " nnz=" << this->get_nnz() );
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::AllocateDIA(const int nnz, const int nrow, const int ncol, const int ndiag) {
assert(nnz >= 0);
assert(ncol >= 0);
assert(nrow >= 0);
if (this->get_nnz() > 0)
this->Clear();
if (nnz > 0) {
assert(ndiag > 0);
allocate_gpu(nnz, &this->mat_.val);
allocate_gpu(ndiag, &this->mat_.offset);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
nnz, mat_.val);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
ndiag, mat_.offset);
this->nrow_ = nrow;
this->ncol_ = ncol;
this->nnz_ = nnz;
this->mat_.num_diag = ndiag;
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::Clear() {
if (this->get_nnz() > 0) {
free_gpu(&this->mat_.val);
free_gpu(&this->mat_.offset);
this->nrow_ = 0;
this->ncol_ = 0;
this->nnz_ = 0;
this->mat_.num_diag = 0 ;
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyFromHost(const HostMatrix<ValueType> &src) {
const HostMatrixDIA<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// CPU to GPU copy
if ((cast_mat = dynamic_cast<const HostMatrixDIA<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateDIA(cast_mat->get_nnz(), cast_mat->get_nrow(), cast_mat->get_ncol(), cast_mat->get_ndiag());
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(this->mat_.offset, // dst
cast_mat->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.val, // dst
cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyToHost(HostMatrix<ValueType> *dst) const {
HostMatrixDIA<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to CPU copy
if ((cast_mat = dynamic_cast<HostMatrixDIA<ValueType>*> (dst)) != NULL) {
cast_mat->set_backend(this->local_backend_);
if (dst->get_nnz() == 0)
cast_mat->AllocateDIA(this->get_nnz(), this->get_nrow(), this->get_ncol(), this->get_ndiag());
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_nrow() == dst->get_nrow());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(cast_mat->mat_.offset, // dst
this->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyFrom(const BaseMatrix<ValueType> &src) {
const GPUAcceleratorMatrixDIA<ValueType> *gpu_cast_mat;
const HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixDIA<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateDIA(gpu_cast_mat->get_nnz(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol(), gpu_cast_mat->get_ndiag());
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(this->mat_.offset, // dst
gpu_cast_mat->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.val, // dst
gpu_cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//CPU to GPU
if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) {
this->CopyFromHost(*host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyTo(BaseMatrix<ValueType> *dst) const {
GPUAcceleratorMatrixDIA<ValueType> *gpu_cast_mat;
HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixDIA<ValueType>*> (dst)) != NULL) {
gpu_cast_mat->set_backend(this->local_backend_);
if (this->get_nnz() == 0)
gpu_cast_mat->AllocateDIA(gpu_cast_mat->get_nnz(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol(), gpu_cast_mat->get_ndiag());
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_nrow() == dst->get_nrow());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(gpu_cast_mat->mat_.offset, // dst
this->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(gpu_cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//GPU to CPU
if ((host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) != NULL) {
this->CopyToHost(host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyFromHostAsync(const HostMatrix<ValueType> &src) {
const HostMatrixDIA<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// CPU to GPU copy
if ((cast_mat = dynamic_cast<const HostMatrixDIA<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateDIA(cast_mat->get_nnz(), cast_mat->get_nrow(), cast_mat->get_ncol(), cast_mat->get_ndiag());
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
hipMemcpyAsync(this->mat_.offset, // dst
cast_mat->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpyAsync(this->mat_.val, // dst
cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyToHostAsync(HostMatrix<ValueType> *dst) const {
HostMatrixDIA<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to CPU copy
if ((cast_mat = dynamic_cast<HostMatrixDIA<ValueType>*> (dst)) != NULL) {
cast_mat->set_backend(this->local_backend_);
if (dst->get_nnz() == 0)
cast_mat->AllocateDIA(this->get_nnz(), this->get_nrow(), this->get_ncol(), this->get_ndiag());
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_nrow() == dst->get_nrow());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
hipMemcpyAsync(cast_mat->mat_.offset, // dst
this->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpyAsync(cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyFromAsync(const BaseMatrix<ValueType> &src) {
const GPUAcceleratorMatrixDIA<ValueType> *gpu_cast_mat;
const HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixDIA<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateDIA(gpu_cast_mat->get_nnz(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol(), gpu_cast_mat->get_ndiag());
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(this->mat_.offset, // dst
gpu_cast_mat->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.val, // dst
gpu_cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//CPU to GPU
if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) {
this->CopyFromHostAsync(*host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyToAsync(BaseMatrix<ValueType> *dst) const {
GPUAcceleratorMatrixDIA<ValueType> *gpu_cast_mat;
HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixDIA<ValueType>*> (dst)) != NULL) {
gpu_cast_mat->set_backend(this->local_backend_);
if (this->get_nnz() == 0)
gpu_cast_mat->AllocateDIA(gpu_cast_mat->get_nnz(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol(), gpu_cast_mat->get_ndiag());
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_nrow() == dst->get_nrow());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(gpu_cast_mat->mat_.offset, // dst
this->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(gpu_cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//GPU to CPU
if ((host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) != NULL) {
this->CopyToHostAsync(host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
bool GPUAcceleratorMatrixDIA<ValueType>::ConvertFrom(const BaseMatrix<ValueType> &mat) {
this->Clear();
// empty matrix is empty matrix
if (mat.get_nnz() == 0)
return true;
const GPUAcceleratorMatrixDIA<ValueType> *cast_mat_dia;
if ((cast_mat_dia = dynamic_cast<const GPUAcceleratorMatrixDIA<ValueType>*> (&mat)) != NULL) {
this->CopyFrom(*cast_mat_dia);
return true;
}
const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_csr;
if ((cast_mat_csr = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&mat)) != NULL) {
this->Clear();
// TODO
// upper bound (somehow fixed for now)
//
// GROUP_SIZE = ( size_t( ( size_t( nrow+ncol / ( this->local_backend_.GPU_wrap * 4 ) ) + 1 )
// / this->local_backend_.GPU_block_size ) + 1 ) * this->local_backend_.GPU_block_size;
//
if (cast_mat_csr->get_nrow()+cast_mat_csr->get_ncol() > 16842494*4)
return false;
int nrow = cast_mat_csr->get_nrow();
int ncol = cast_mat_csr->get_ncol();
int *diag_map = NULL;
// Get diagonal mapping vector
allocate_gpu<int>(nrow+ncol, &diag_map);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
nrow+ncol, diag_map);
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(( kernel_dia_diag_map<int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, cast_mat_csr->mat_.row_offset,
cast_mat_csr->mat_.col, diag_map);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
// Reduction to obtain number of occupied diagonals
int *d_buffer = NULL;
int *h_buffer = NULL;
int GROUP_SIZE;
int LOCAL_SIZE;
int FinalReduceSize;
allocate_gpu<int>(this->local_backend_.GPU_wrap * 4, &d_buffer);
dim3 GridSize2(this->local_backend_.GPU_wrap * 4);
GROUP_SIZE = ( size_t( ( size_t( nrow+ncol / ( this->local_backend_.GPU_wrap * 4 ) ) + 1 )
/ this->local_backend_.GPU_block_size ) + 1 ) * this->local_backend_.GPU_block_size;
LOCAL_SIZE = GROUP_SIZE / this->local_backend_.GPU_block_size;
hipLaunchKernelGGL(( kernel_reduce<int, int, 256>) , dim3(GridSize2), dim3(BlockSize), 0, 0, nrow+ncol, diag_map, d_buffer, GROUP_SIZE, LOCAL_SIZE);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
FinalReduceSize = this->local_backend_.GPU_wrap * 4;
allocate_host(FinalReduceSize, &h_buffer);
hipMemcpy(h_buffer, // dst
d_buffer, // src
FinalReduceSize*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
free_gpu<int>(&d_buffer);
int num_diag = 0;
for ( int i=0; i<FinalReduceSize; ++i )
num_diag += h_buffer[i];
free_host(&h_buffer);
if (num_diag > 200) {
LOG_INFO("Error: CSR to DIA conversion - too many diagonal elements");
LOG_INFO("Number of diagonals = " << num_diag);
FATAL_ERROR(__FILE__, __LINE__);
}
int nnz_dia;
if (nrow < ncol)
nnz_dia = ncol * num_diag;
else
nnz_dia = nrow * num_diag;
// Allocate DIA structure
this->AllocateDIA(nnz_dia, nrow, ncol, num_diag);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
nnz_dia, this->mat_.val);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
num_diag, this->mat_.offset);
// Fill diagonal offset array
allocate_gpu<int>(nrow+ncol+1, &d_buffer);
// TODO currently performing partial sum on host
allocate_host(nrow+ncol+1, &h_buffer);
hipMemcpy(h_buffer+1, // dst
diag_map, // src
(nrow+ncol)*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
h_buffer[0] = 0;
for (int i=2; i<nrow+ncol+1; ++i)
h_buffer[i] += h_buffer[i-1];
hipMemcpy(d_buffer, // dst
h_buffer, // src
(nrow+ncol)*sizeof(int), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
free_host(&h_buffer);
// end TODO
// TODO
// fix the numbers (not hardcoded)
//
if (cast_mat_csr->get_nrow()+cast_mat_csr->get_ncol() > 16842494) {
// Large systems
// 2D indexing
int d2_bs = 16;
int gsize1 = 65535;
int gsize2 = ((nrow+ncol)/(65535*d2_bs))/d2_bs + 1;
dim3 GridSize3(gsize1,
gsize2);
dim3 BlockSize3(d2_bs,
d2_bs);
hipLaunchKernelGGL(( kernel_dia_fill_offset<int>) , dim3(GridSize3), dim3(BlockSize3), 0, 0, nrow, ncol, diag_map,
d_buffer, this->mat_.offset);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
} else {
// Small systems
// 1D indexing
dim3 GridSize3((nrow+ncol) / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(( kernel_dia_fill_offset<int>) , dim3(GridSize3), dim3(BlockSize), 0, 0, nrow, ncol, diag_map,
d_buffer, this->mat_.offset);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
free_gpu<int>(&d_buffer);
hipLaunchKernelGGL(( kernel_dia_convert<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, num_diag, cast_mat_csr->mat_.row_offset,
cast_mat_csr->mat_.col, cast_mat_csr->mat_.val,
diag_map, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
free_gpu<int>(&diag_map);
this->nrow_ = cast_mat_csr->get_nrow();
this->ncol_ = cast_mat_csr->get_ncol();
this->nnz_ = nnz_dia;
this->mat_.num_diag = num_diag;
return true;
}
return false;
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::Apply(const BaseVector<ValueType> &in, BaseVector<ValueType> *out) const {
if (this->get_nnz() > 0) {
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
const GPUAcceleratorVector<ValueType> *cast_in = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&in) ;
GPUAcceleratorVector<ValueType> *cast_out = dynamic_cast< GPUAcceleratorVector<ValueType>*> (out) ;
assert(cast_in != NULL);
assert(cast_out!= NULL);
int nrow = this->get_nrow();
int ncol = this->get_ncol();
int num_diag = this->get_ndiag();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(( kernel_dia_spmv<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, ncol, num_diag,
this->mat_.offset, this->mat_.val,
cast_in->vec_, cast_out->vec_ );
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::ApplyAdd(const BaseVector<ValueType> &in, const ValueType scalar,
BaseVector<ValueType> *out) const {
if (this->get_nnz() > 0) {
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
const GPUAcceleratorVector<ValueType> *cast_in = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&in) ;
GPUAcceleratorVector<ValueType> *cast_out = dynamic_cast< GPUAcceleratorVector<ValueType>*> (out) ;
assert(cast_in != NULL);
assert(cast_out!= NULL);
int nrow = this->get_nrow();
int ncol = this->get_ncol();
int num_diag = this->get_ndiag();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(( kernel_dia_add_spmv<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, ncol, num_diag,
this->mat_.offset, this->mat_.val,
scalar,
cast_in->vec_, cast_out->vec_ );
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
}
template class GPUAcceleratorMatrixDIA<double>;
template class GPUAcceleratorMatrixDIA<float>;
}
| a0f3a7083e96408edc923eaa8d61aa32d50aa725.cu | // *************************************************************************
//
// PARALUTION www.paralution.com
//
// Copyright (C) 2012-2014 Dimitar Lukarski
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//
// *************************************************************************
// PARALUTION version 0.7.0
#include "gpu_matrix_csr.hpp"
#include "gpu_matrix_coo.hpp"
#include "gpu_matrix_dia.hpp"
#include "gpu_matrix_ell.hpp"
#include "gpu_matrix_hyb.hpp"
#include "gpu_matrix_mcsr.hpp"
#include "gpu_matrix_bcsr.hpp"
#include "gpu_matrix_dense.hpp"
#include "gpu_vector.hpp"
#include "../host/host_matrix_dia.hpp"
#include "../base_matrix.hpp"
#include "../base_vector.hpp"
#include "../backend_manager.hpp"
#include "../../utils/log.hpp"
#include "../../utils/allocate_free.hpp"
#include "gpu_utils.hpp"
#include "cuda_kernels_general.hpp"
#include "cuda_kernels_dia.hpp"
#include "cuda_kernels_vector.hpp"
#include "gpu_allocate_free.hpp"
#include "../matrix_formats_ind.hpp"
#include <assert.h>
#include <cuda.h>
#include <cusparse_v2.h>
namespace paralution {
template <typename ValueType>
GPUAcceleratorMatrixDIA<ValueType>::GPUAcceleratorMatrixDIA() {
// no default constructors
LOG_INFO("no default constructor");
FATAL_ERROR(__FILE__, __LINE__);
}
template <typename ValueType>
GPUAcceleratorMatrixDIA<ValueType>::GPUAcceleratorMatrixDIA(const Paralution_Backend_Descriptor local_backend) {
LOG_DEBUG(this, "GPUAcceleratorMatrixDIA::GPUAcceleratorMatrixDIA()",
"constructor with local_backend");
this->mat_.val = NULL;
this->mat_.offset = NULL;
this->mat_.num_diag = 0 ;
this->set_backend(local_backend);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
template <typename ValueType>
GPUAcceleratorMatrixDIA<ValueType>::~GPUAcceleratorMatrixDIA() {
LOG_DEBUG(this, "GPUAcceleratorMatrixDIA::GPUAcceleratorMatrixDIA()",
"destructor");
this->Clear();
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::info(void) const {
LOG_INFO("GPUAcceleratorMatrixDIA<ValueType> diag=" << this->get_ndiag() << " nnz=" << this->get_nnz() );
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::AllocateDIA(const int nnz, const int nrow, const int ncol, const int ndiag) {
assert(nnz >= 0);
assert(ncol >= 0);
assert(nrow >= 0);
if (this->get_nnz() > 0)
this->Clear();
if (nnz > 0) {
assert(ndiag > 0);
allocate_gpu(nnz, &this->mat_.val);
allocate_gpu(ndiag, &this->mat_.offset);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
nnz, mat_.val);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
ndiag, mat_.offset);
this->nrow_ = nrow;
this->ncol_ = ncol;
this->nnz_ = nnz;
this->mat_.num_diag = ndiag;
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::Clear() {
if (this->get_nnz() > 0) {
free_gpu(&this->mat_.val);
free_gpu(&this->mat_.offset);
this->nrow_ = 0;
this->ncol_ = 0;
this->nnz_ = 0;
this->mat_.num_diag = 0 ;
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyFromHost(const HostMatrix<ValueType> &src) {
const HostMatrixDIA<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// CPU to GPU copy
if ((cast_mat = dynamic_cast<const HostMatrixDIA<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateDIA(cast_mat->get_nnz(), cast_mat->get_nrow(), cast_mat->get_ncol(), cast_mat->get_ndiag());
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
cudaMemcpy(this->mat_.offset, // dst
cast_mat->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
cudaMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
cudaMemcpy(this->mat_.val, // dst
cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
cudaMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyToHost(HostMatrix<ValueType> *dst) const {
HostMatrixDIA<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to CPU copy
if ((cast_mat = dynamic_cast<HostMatrixDIA<ValueType>*> (dst)) != NULL) {
cast_mat->set_backend(this->local_backend_);
if (dst->get_nnz() == 0)
cast_mat->AllocateDIA(this->get_nnz(), this->get_nrow(), this->get_ncol(), this->get_ndiag());
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_nrow() == dst->get_nrow());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
cudaMemcpy(cast_mat->mat_.offset, // dst
this->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
cudaMemcpy(cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyFrom(const BaseMatrix<ValueType> &src) {
const GPUAcceleratorMatrixDIA<ValueType> *gpu_cast_mat;
const HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixDIA<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateDIA(gpu_cast_mat->get_nnz(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol(), gpu_cast_mat->get_ndiag());
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
cudaMemcpy(this->mat_.offset, // dst
gpu_cast_mat->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
cudaMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
cudaMemcpy(this->mat_.val, // dst
gpu_cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
cudaMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//CPU to GPU
if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) {
this->CopyFromHost(*host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyTo(BaseMatrix<ValueType> *dst) const {
GPUAcceleratorMatrixDIA<ValueType> *gpu_cast_mat;
HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixDIA<ValueType>*> (dst)) != NULL) {
gpu_cast_mat->set_backend(this->local_backend_);
if (this->get_nnz() == 0)
gpu_cast_mat->AllocateDIA(gpu_cast_mat->get_nnz(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol(), gpu_cast_mat->get_ndiag());
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_nrow() == dst->get_nrow());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
cudaMemcpy(gpu_cast_mat->mat_.offset, // dst
this->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
cudaMemcpy(gpu_cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//GPU to CPU
if ((host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) != NULL) {
this->CopyToHost(host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyFromHostAsync(const HostMatrix<ValueType> &src) {
const HostMatrixDIA<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// CPU to GPU copy
if ((cast_mat = dynamic_cast<const HostMatrixDIA<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateDIA(cast_mat->get_nnz(), cast_mat->get_nrow(), cast_mat->get_ncol(), cast_mat->get_ndiag());
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
cudaMemcpyAsync(this->mat_.offset, // dst
cast_mat->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
cudaMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
cudaMemcpyAsync(this->mat_.val, // dst
cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
cudaMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyToHostAsync(HostMatrix<ValueType> *dst) const {
HostMatrixDIA<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to CPU copy
if ((cast_mat = dynamic_cast<HostMatrixDIA<ValueType>*> (dst)) != NULL) {
cast_mat->set_backend(this->local_backend_);
if (dst->get_nnz() == 0)
cast_mat->AllocateDIA(this->get_nnz(), this->get_nrow(), this->get_ncol(), this->get_ndiag());
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_nrow() == dst->get_nrow());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
cudaMemcpyAsync(cast_mat->mat_.offset, // dst
this->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
cudaMemcpyAsync(cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyFromAsync(const BaseMatrix<ValueType> &src) {
const GPUAcceleratorMatrixDIA<ValueType> *gpu_cast_mat;
const HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixDIA<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateDIA(gpu_cast_mat->get_nnz(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol(), gpu_cast_mat->get_ndiag());
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
cudaMemcpy(this->mat_.offset, // dst
gpu_cast_mat->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
cudaMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
cudaMemcpy(this->mat_.val, // dst
gpu_cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
cudaMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//CPU to GPU
if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) {
this->CopyFromHostAsync(*host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyToAsync(BaseMatrix<ValueType> *dst) const {
GPUAcceleratorMatrixDIA<ValueType> *gpu_cast_mat;
HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixDIA<ValueType>*> (dst)) != NULL) {
gpu_cast_mat->set_backend(this->local_backend_);
if (this->get_nnz() == 0)
gpu_cast_mat->AllocateDIA(gpu_cast_mat->get_nnz(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol(), gpu_cast_mat->get_ndiag());
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_nrow() == dst->get_nrow());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
cudaMemcpy(gpu_cast_mat->mat_.offset, // dst
this->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
cudaMemcpy(gpu_cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//GPU to CPU
if ((host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) != NULL) {
this->CopyToHostAsync(host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
bool GPUAcceleratorMatrixDIA<ValueType>::ConvertFrom(const BaseMatrix<ValueType> &mat) {
this->Clear();
// empty matrix is empty matrix
if (mat.get_nnz() == 0)
return true;
const GPUAcceleratorMatrixDIA<ValueType> *cast_mat_dia;
if ((cast_mat_dia = dynamic_cast<const GPUAcceleratorMatrixDIA<ValueType>*> (&mat)) != NULL) {
this->CopyFrom(*cast_mat_dia);
return true;
}
const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_csr;
if ((cast_mat_csr = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&mat)) != NULL) {
this->Clear();
// TODO
// upper bound (somehow fixed for now)
//
// GROUP_SIZE = ( size_t( ( size_t( nrow+ncol / ( this->local_backend_.GPU_wrap * 4 ) ) + 1 )
// / this->local_backend_.GPU_block_size ) + 1 ) * this->local_backend_.GPU_block_size;
//
if (cast_mat_csr->get_nrow()+cast_mat_csr->get_ncol() > 16842494*4)
return false;
int nrow = cast_mat_csr->get_nrow();
int ncol = cast_mat_csr->get_ncol();
int *diag_map = NULL;
// Get diagonal mapping vector
allocate_gpu<int>(nrow+ncol, &diag_map);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
nrow+ncol, diag_map);
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
kernel_dia_diag_map<int> <<<GridSize, BlockSize>>> (nrow, cast_mat_csr->mat_.row_offset,
cast_mat_csr->mat_.col, diag_map);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
// Reduction to obtain number of occupied diagonals
int *d_buffer = NULL;
int *h_buffer = NULL;
int GROUP_SIZE;
int LOCAL_SIZE;
int FinalReduceSize;
allocate_gpu<int>(this->local_backend_.GPU_wrap * 4, &d_buffer);
dim3 GridSize2(this->local_backend_.GPU_wrap * 4);
GROUP_SIZE = ( size_t( ( size_t( nrow+ncol / ( this->local_backend_.GPU_wrap * 4 ) ) + 1 )
/ this->local_backend_.GPU_block_size ) + 1 ) * this->local_backend_.GPU_block_size;
LOCAL_SIZE = GROUP_SIZE / this->local_backend_.GPU_block_size;
kernel_reduce<int, int, 256> <<<GridSize2, BlockSize>>> (nrow+ncol, diag_map, d_buffer, GROUP_SIZE, LOCAL_SIZE);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
FinalReduceSize = this->local_backend_.GPU_wrap * 4;
allocate_host(FinalReduceSize, &h_buffer);
cudaMemcpy(h_buffer, // dst
d_buffer, // src
FinalReduceSize*sizeof(int), // size
cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
free_gpu<int>(&d_buffer);
int num_diag = 0;
for ( int i=0; i<FinalReduceSize; ++i )
num_diag += h_buffer[i];
free_host(&h_buffer);
if (num_diag > 200) {
LOG_INFO("Error: CSR to DIA conversion - too many diagonal elements");
LOG_INFO("Number of diagonals = " << num_diag);
FATAL_ERROR(__FILE__, __LINE__);
}
int nnz_dia;
if (nrow < ncol)
nnz_dia = ncol * num_diag;
else
nnz_dia = nrow * num_diag;
// Allocate DIA structure
this->AllocateDIA(nnz_dia, nrow, ncol, num_diag);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
nnz_dia, this->mat_.val);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
num_diag, this->mat_.offset);
// Fill diagonal offset array
allocate_gpu<int>(nrow+ncol+1, &d_buffer);
// TODO currently performing partial sum on host
allocate_host(nrow+ncol+1, &h_buffer);
cudaMemcpy(h_buffer+1, // dst
diag_map, // src
(nrow+ncol)*sizeof(int), // size
cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
h_buffer[0] = 0;
for (int i=2; i<nrow+ncol+1; ++i)
h_buffer[i] += h_buffer[i-1];
cudaMemcpy(d_buffer, // dst
h_buffer, // src
(nrow+ncol)*sizeof(int), // size
cudaMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
free_host(&h_buffer);
// end TODO
// TODO
// fix the numbers (not hardcoded)
//
if (cast_mat_csr->get_nrow()+cast_mat_csr->get_ncol() > 16842494) {
// Large systems
// 2D indexing
int d2_bs = 16;
int gsize1 = 65535;
int gsize2 = ((nrow+ncol)/(65535*d2_bs))/d2_bs + 1;
dim3 GridSize3(gsize1,
gsize2);
dim3 BlockSize3(d2_bs,
d2_bs);
kernel_dia_fill_offset<int> <<<GridSize3, BlockSize3>>> (nrow, ncol, diag_map,
d_buffer, this->mat_.offset);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
} else {
// Small systems
// 1D indexing
dim3 GridSize3((nrow+ncol) / this->local_backend_.GPU_block_size + 1);
kernel_dia_fill_offset<int> <<<GridSize3, BlockSize>>> (nrow, ncol, diag_map,
d_buffer, this->mat_.offset);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
free_gpu<int>(&d_buffer);
kernel_dia_convert<ValueType, int> <<<GridSize, BlockSize>>> (nrow, num_diag, cast_mat_csr->mat_.row_offset,
cast_mat_csr->mat_.col, cast_mat_csr->mat_.val,
diag_map, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
free_gpu<int>(&diag_map);
this->nrow_ = cast_mat_csr->get_nrow();
this->ncol_ = cast_mat_csr->get_ncol();
this->nnz_ = nnz_dia;
this->mat_.num_diag = num_diag;
return true;
}
return false;
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::Apply(const BaseVector<ValueType> &in, BaseVector<ValueType> *out) const {
if (this->get_nnz() > 0) {
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
const GPUAcceleratorVector<ValueType> *cast_in = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&in) ;
GPUAcceleratorVector<ValueType> *cast_out = dynamic_cast< GPUAcceleratorVector<ValueType>*> (out) ;
assert(cast_in != NULL);
assert(cast_out!= NULL);
int nrow = this->get_nrow();
int ncol = this->get_ncol();
int num_diag = this->get_ndiag();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
kernel_dia_spmv<ValueType, int> <<<GridSize, BlockSize>>> (nrow, ncol, num_diag,
this->mat_.offset, this->mat_.val,
cast_in->vec_, cast_out->vec_ );
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::ApplyAdd(const BaseVector<ValueType> &in, const ValueType scalar,
BaseVector<ValueType> *out) const {
if (this->get_nnz() > 0) {
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
const GPUAcceleratorVector<ValueType> *cast_in = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&in) ;
GPUAcceleratorVector<ValueType> *cast_out = dynamic_cast< GPUAcceleratorVector<ValueType>*> (out) ;
assert(cast_in != NULL);
assert(cast_out!= NULL);
int nrow = this->get_nrow();
int ncol = this->get_ncol();
int num_diag = this->get_ndiag();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
kernel_dia_add_spmv<ValueType, int> <<<GridSize, BlockSize>>> (nrow, ncol, num_diag,
this->mat_.offset, this->mat_.val,
scalar,
cast_in->vec_, cast_out->vec_ );
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
}
template class GPUAcceleratorMatrixDIA<double>;
template class GPUAcceleratorMatrixDIA<float>;
}
|
89e909935a76967efbe7b198304ab9d784856876.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
__global__ void printMatrix(float **d_matrix, int size) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i < size && i >= 0) {
if (j < size && j >=0) {
printf("i is %d, j is %d, %f \n", i, j, d_matrix[i][j]);
}
}
}
__global__ void changeFirstElementToOne(float **d_matrix, float **d_inversion, int pivot, int size, float firstElement) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i == pivot) {
if (j >= 0 && j < size) {
d_matrix[i][j] = d_matrix[i][j] / firstElement;
d_inversion[i][j] = d_inversion[i][j] / firstElement;
}
}
}
__global__ void GJKernel(float **d_matrix, float **d_inversion, int pivot, int size) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i >= 0 && i < size && i != pivot && j < size && j >= 0) {
if (j != pivot) {
d_matrix[i][j] = d_matrix[i][j] - d_matrix[i][pivot] * d_matrix[pivot][j];
}
d_inversion[i][j] = d_inversion[i][j] - d_matrix[i][pivot] * d_inversion[pivot][j];
}
}
__global__ void setPivotColumnToZero(float **d_matrix, int pivot, int size) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i >= 0 && i < size && i != pivot) {
if (j == pivot) {
d_matrix[i][j] = 0.0;
}
}
}
int main(void) {
// read in data
std::ifstream file_("test100.txt");
if (!file_) {
std::cout << "Cannot open file.\n";
return 0;
}
int size; // size of the matrix
file_ >> size;
float **matrix; // matrix to inverse
matrix = new float*[size];
for (int i = 0; i < size; i++) {
matrix[i] = new float[size];
}
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
file_ >> matrix[i][j];
}
}
// initialize variable
float **inversion, **d_inversion; // result
float **d_inversion_h;
d_inversion_h = (float**)malloc(size * sizeof(float *));
float **d_matrix;
float **d_matrix_h;
d_matrix_h = (float**)malloc(size * sizeof(float *));
// alloc space for device copies
hipMalloc((void **)&d_inversion, size * sizeof(float*));
hipMalloc((void **)&d_matrix, size * sizeof(float*));
// alloc space for host copies
inversion = (float**)malloc(size * sizeof(float *));
// initial inversion
for (int i = 0; i < size; i++) {
inversion[i] = (float*)malloc(size * sizeof(float));
}
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
if (i == j) inversion[i][j] = 1.0;
else inversion[i][j] = 0.0;
}
}
// copy from host to device
for (int i = 0; i < size; i++) {
hipMalloc((void**)&(d_matrix_h[i]), size * sizeof(float));
hipMemcpy(d_matrix_h[i], matrix[i], size * sizeof(float), hipMemcpyHostToDevice);
}
hipMemcpy(d_matrix, d_matrix_h, size * sizeof(float*), hipMemcpyHostToDevice);
for (int i = 0; i < size; i++) {
hipMalloc((void**)&(d_inversion_h[i]), size * sizeof(float));
hipMemcpy(d_inversion_h[i], inversion[i], size * sizeof(float), hipMemcpyHostToDevice);
}
hipMemcpy(d_inversion, d_inversion_h, size * sizeof(float*), hipMemcpyHostToDevice);
// threadsPerBlock, numBlocks
dim3 threadsPerBlock(8, 8);
dim3 numBlocks((size - 1 + threadsPerBlock.x)/threadsPerBlock.x, (size - 1 + threadsPerBlock.y)/threadsPerBlock.y);
struct timespec cudalustart = {0,0}; //time of constructing GF
struct timespec cudaluend = {0,0};
clock_gettime(CLOCK_REALTIME,&cudalustart);
// Gauss-Jordan
for (int i = 0; i < size; i++) {
// change first element of the pivot line to 1
float firstElement;
hipMemcpy(&firstElement, &d_matrix_h[i][i], sizeof(float), hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( changeFirstElementToOne), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_matrix, d_inversion, i, size, firstElement);
hipLaunchKernelGGL(( GJKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_matrix, d_inversion, i, size);
hipLaunchKernelGGL(( setPivotColumnToZero), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_matrix, i, size);
}
// copy result from d_inversion to inversion
for (int i = 0; i < size; i++) {
hipMemcpy(inversion[i], d_inversion_h[i], size * sizeof(float), hipMemcpyDeviceToHost);
}
clock_gettime(CLOCK_REALTIME,&cudaluend);
std::cout<<"The time is "<<(cudaluend.tv_sec-cudalustart.tv_sec)*1000+(cudaluend.tv_nsec-cudalustart.tv_nsec)/1000000<<"ms\n";
/*
// print the result
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
std::cout << inversion[i][j] << " ";
}
std::cout << std::endl;
}
*/
// clean up
free(matrix); free(inversion); free(d_matrix_h); free(d_inversion_h);
hipFree(d_matrix); hipFree(d_inversion);
return 0;
} | 89e909935a76967efbe7b198304ab9d784856876.cu | #include <cuda.h>
#include <stdio.h>
#include <time.h>
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
__global__ void printMatrix(float **d_matrix, int size) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i < size && i >= 0) {
if (j < size && j >=0) {
printf("i is %d, j is %d, %f \n", i, j, d_matrix[i][j]);
}
}
}
__global__ void changeFirstElementToOne(float **d_matrix, float **d_inversion, int pivot, int size, float firstElement) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i == pivot) {
if (j >= 0 && j < size) {
d_matrix[i][j] = d_matrix[i][j] / firstElement;
d_inversion[i][j] = d_inversion[i][j] / firstElement;
}
}
}
__global__ void GJKernel(float **d_matrix, float **d_inversion, int pivot, int size) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i >= 0 && i < size && i != pivot && j < size && j >= 0) {
if (j != pivot) {
d_matrix[i][j] = d_matrix[i][j] - d_matrix[i][pivot] * d_matrix[pivot][j];
}
d_inversion[i][j] = d_inversion[i][j] - d_matrix[i][pivot] * d_inversion[pivot][j];
}
}
__global__ void setPivotColumnToZero(float **d_matrix, int pivot, int size) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i >= 0 && i < size && i != pivot) {
if (j == pivot) {
d_matrix[i][j] = 0.0;
}
}
}
int main(void) {
// read in data
std::ifstream file_("test100.txt");
if (!file_) {
std::cout << "Cannot open file.\n";
return 0;
}
int size; // size of the matrix
file_ >> size;
float **matrix; // matrix to inverse
matrix = new float*[size];
for (int i = 0; i < size; i++) {
matrix[i] = new float[size];
}
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
file_ >> matrix[i][j];
}
}
// initialize variable
float **inversion, **d_inversion; // result
float **d_inversion_h;
d_inversion_h = (float**)malloc(size * sizeof(float *));
float **d_matrix;
float **d_matrix_h;
d_matrix_h = (float**)malloc(size * sizeof(float *));
// alloc space for device copies
cudaMalloc((void **)&d_inversion, size * sizeof(float*));
cudaMalloc((void **)&d_matrix, size * sizeof(float*));
// alloc space for host copies
inversion = (float**)malloc(size * sizeof(float *));
// initial inversion
for (int i = 0; i < size; i++) {
inversion[i] = (float*)malloc(size * sizeof(float));
}
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
if (i == j) inversion[i][j] = 1.0;
else inversion[i][j] = 0.0;
}
}
// copy from host to device
for (int i = 0; i < size; i++) {
cudaMalloc((void**)&(d_matrix_h[i]), size * sizeof(float));
cudaMemcpy(d_matrix_h[i], matrix[i], size * sizeof(float), cudaMemcpyHostToDevice);
}
cudaMemcpy(d_matrix, d_matrix_h, size * sizeof(float*), cudaMemcpyHostToDevice);
for (int i = 0; i < size; i++) {
cudaMalloc((void**)&(d_inversion_h[i]), size * sizeof(float));
cudaMemcpy(d_inversion_h[i], inversion[i], size * sizeof(float), cudaMemcpyHostToDevice);
}
cudaMemcpy(d_inversion, d_inversion_h, size * sizeof(float*), cudaMemcpyHostToDevice);
// threadsPerBlock, numBlocks
dim3 threadsPerBlock(8, 8);
dim3 numBlocks((size - 1 + threadsPerBlock.x)/threadsPerBlock.x, (size - 1 + threadsPerBlock.y)/threadsPerBlock.y);
struct timespec cudalustart = {0,0}; //time of constructing GF
struct timespec cudaluend = {0,0};
clock_gettime(CLOCK_REALTIME,&cudalustart);
// Gauss-Jordan
for (int i = 0; i < size; i++) {
// change first element of the pivot line to 1
float firstElement;
cudaMemcpy(&firstElement, &d_matrix_h[i][i], sizeof(float), cudaMemcpyDeviceToHost);
changeFirstElementToOne<<<numBlocks, threadsPerBlock>>>(d_matrix, d_inversion, i, size, firstElement);
GJKernel<<<numBlocks, threadsPerBlock>>>(d_matrix, d_inversion, i, size);
setPivotColumnToZero<<<numBlocks, threadsPerBlock>>>(d_matrix, i, size);
}
// copy result from d_inversion to inversion
for (int i = 0; i < size; i++) {
cudaMemcpy(inversion[i], d_inversion_h[i], size * sizeof(float), cudaMemcpyDeviceToHost);
}
clock_gettime(CLOCK_REALTIME,&cudaluend);
std::cout<<"The time is "<<(cudaluend.tv_sec-cudalustart.tv_sec)*1000+(cudaluend.tv_nsec-cudalustart.tv_nsec)/1000000<<"ms\n";
/*
// print the result
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
std::cout << inversion[i][j] << " ";
}
std::cout << std::endl;
}
*/
// clean up
free(matrix); free(inversion); free(d_matrix_h); free(d_inversion_h);
cudaFree(d_matrix); cudaFree(d_inversion);
return 0;
} |
811fe2f08f59b8546c4255ae8a51f55de252ac30.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*
* surf1Dmemset.cu
*
* Microdemo to illustrate 1D memset via surface store.
*
* Build with: nvcc --gpu-architecture sm_20 -I ../chLib <options> surf1Dmemset.cu
* Requires: SM 2.x for surface load/store.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include <chError.h>
#define NUM_VALUES 16
surface<void, 1> surf1D;
template <typename T>
__global__ void
surf1Dmemset_kernel( T value, int offset, size_t N )
{
for ( size_t i = blockIdx.x*blockDim.x + threadIdx.x;
i < N;
i += blockDim.x*gridDim.x )
{
surf1Dwrite( value, surf1D, (offset+i)*sizeof(T) );
}
}
template<typename T>
hipError_t
surf1Dmemset( hipArray *array, T value, int offset, size_t N )
{
hipError_t status;
cuda(BindSurfaceToArray(surf1D, array));
hipLaunchKernelGGL(( surf1Dmemset_kernel), dim3(2),dim3(384), 0, 0, value, offset, N*sizeof(T) );
Error:
return status;
}
int
main( int argc, char *argv[] )
{
int ret = 1;
float *foutHost = 0;
hipError_t status;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipArray *array = 0;
hipDeviceProp_t prop;
cuda(GetDeviceProperties(&prop, 0));
if ( prop.major < 2 ) {
printf( "This application requires SM 2.x (for surface load/store)\n" );
goto Error;
}
cuda(HostAlloc(
(void **) &foutHost,
NUM_VALUES*sizeof(float),
hipHostMallocMapped));
cuda(MallocArray(
&array,
&channelDesc,
NUM_VALUES*sizeof(float),
1,
hipArraySurfaceLoadStore ) );
CUDART_CHECK(surf1Dmemset( array, 3.141592654f, 0, NUM_VALUES ));
cuda(MemcpyFromArray(
foutHost,
array,
0,
0,
NUM_VALUES*sizeof(float),
hipMemcpyDeviceToHost ));
printf( "Surface contents (int form):\n" );
for ( int i = 0; i < NUM_VALUES; i++ ) {
printf( "%08x ", *(int *) (&foutHost[i]) );
}
printf( "\nSurface contents (int form):\n" );
for ( int i = 0; i < NUM_VALUES; i++ ) {
printf( "%E ", foutHost[i] );
}
printf( "\n" );
ret = 0;
Error:
hipHostFree( foutHost );
return ret;
}
| 811fe2f08f59b8546c4255ae8a51f55de252ac30.cu | /*
*
* surf1Dmemset.cu
*
* Microdemo to illustrate 1D memset via surface store.
*
* Build with: nvcc --gpu-architecture sm_20 -I ../chLib <options> surf1Dmemset.cu
* Requires: SM 2.x for surface load/store.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include <chError.h>
#define NUM_VALUES 16
surface<void, 1> surf1D;
template <typename T>
__global__ void
surf1Dmemset_kernel( T value, int offset, size_t N )
{
for ( size_t i = blockIdx.x*blockDim.x + threadIdx.x;
i < N;
i += blockDim.x*gridDim.x )
{
surf1Dwrite( value, surf1D, (offset+i)*sizeof(T) );
}
}
template<typename T>
cudaError_t
surf1Dmemset( cudaArray *array, T value, int offset, size_t N )
{
cudaError_t status;
cuda(BindSurfaceToArray(surf1D, array));
surf1Dmemset_kernel<<<2,384>>>( value, offset, N*sizeof(T) );
Error:
return status;
}
int
main( int argc, char *argv[] )
{
int ret = 1;
float *foutHost = 0;
cudaError_t status;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaArray *array = 0;
cudaDeviceProp prop;
cuda(GetDeviceProperties(&prop, 0));
if ( prop.major < 2 ) {
printf( "This application requires SM 2.x (for surface load/store)\n" );
goto Error;
}
cuda(HostAlloc(
(void **) &foutHost,
NUM_VALUES*sizeof(float),
cudaHostAllocMapped));
cuda(MallocArray(
&array,
&channelDesc,
NUM_VALUES*sizeof(float),
1,
cudaArraySurfaceLoadStore ) );
CUDART_CHECK(surf1Dmemset( array, 3.141592654f, 0, NUM_VALUES ));
cuda(MemcpyFromArray(
foutHost,
array,
0,
0,
NUM_VALUES*sizeof(float),
cudaMemcpyDeviceToHost ));
printf( "Surface contents (int form):\n" );
for ( int i = 0; i < NUM_VALUES; i++ ) {
printf( "%08x ", *(int *) (&foutHost[i]) );
}
printf( "\nSurface contents (int form):\n" );
for ( int i = 0; i < NUM_VALUES; i++ ) {
printf( "%E ", foutHost[i] );
}
printf( "\n" );
ret = 0;
Error:
cudaFreeHost( foutHost );
return ret;
}
|
b975c1610edd7251b4bc572bd8f672f8057265e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <pairwise_transform.h>
__device__ float op(float d1,float d2,float *params) {
return d2 / d1;
}
__device__ float op(float d1,float *params) {
return d1;
}
extern "C"
__global__ void rdiv_strided_float(int n, int xOffset,int yOffset,float *dx, float *dy,int incx,int incy,float *params,float *result) {
transform(n,xOffset,yOffset,dx,dy,incx,incy,params,result);
}
| b975c1610edd7251b4bc572bd8f672f8057265e7.cu | #include <pairwise_transform.h>
__device__ float op(float d1,float d2,float *params) {
return d2 / d1;
}
__device__ float op(float d1,float *params) {
return d1;
}
extern "C"
__global__ void rdiv_strided_float(int n, int xOffset,int yOffset,float *dx, float *dy,int incx,int incy,float *params,float *result) {
transform(n,xOffset,yOffset,dx,dy,incx,incy,params,result);
}
|
69d373c7cdcae8a3376fe32a9b6a439fed25fdd1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
bool InitCUDA(void)
{
int count = 0;
int i = 0;
hipGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
for(i = 0; i < count; i++) {
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop, i) == hipSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
fprintf(stderr, "There is no device supporting CUDA.\n");
return false;
}
hipSetDevice(i);
printf("CUDA initialized.\n");
return true;
}
| 69d373c7cdcae8a3376fe32a9b6a439fed25fdd1.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
bool InitCUDA(void)
{
int count = 0;
int i = 0;
cudaGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
for(i = 0; i < count; i++) {
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
fprintf(stderr, "There is no device supporting CUDA.\n");
return false;
}
cudaSetDevice(i);
printf("CUDA initialized.\n");
return true;
}
|
825ee48fe4fe0f1b02c66023d174cfa34e943754.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "testMultiGPU_Jacobi.cuh"
#include <iostream>
#include <chrono>
#include <memory>
#include <vector>
using namespace std;
using namespace std::chrono;
#define IMUL(a,b) __mul24(a,b)
//hipError_t performMultiGPUJacobi();
//Support for below c++14 on *nix
template<typename T, typename ...Args>
std::unique_ptr<T> make_unique(Args&& ...args)
{
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
struct create_DeviceHalos
{
int deviceID;
vector<float> eHalo;
vector<float> wHalo;
vector<float> nHalo;
vector<float> sHalo;
};
//Simple Jacobi iteration
__global__ void jacobi_Simple(const float *A0, const float *A1, const float *A2, const float *A3, const float *A4, float *x_in, float *x_out, const float *rhs, float *nhalo, float *shalo, const int deviceID, const int numDevices)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
float result = rhs[index];
int dim_x = blockDim.x;// dim across x
int dim_y = gridDim.x;
int x_pos = blockIdx.x;
int y_pos = threadIdx.x;
//result = nhalo[y_pos];
//x_out[index] = result;
//Get the boundaries
int leftBoundaryElem = x_pos * (dim_x);
int rightBoundaryElem = (x_pos * dim_x) + (dim_x - 1);
int topBoundaryElem = y_pos + ((dim_y - 1) * (dim_x));
int bottomBoundaryElem = y_pos;
//Halo computation for 1D Decompostion: For the First and Last GPU Halo computation on both the sides(nhalo and shalo wont be needed)
if (numDevices>1)
{
//First GPU
if (deviceID == 0) {
//We need to use nhalos
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
//The top boundary needs element from nhalo
if (index == topBoundaryElem)
//nHalos
result -= A4[index] * nhalo[y_pos];
result /= A2[index];
x_out[index] = result;
//Update Halo at the end of computation
if (index == topBoundaryElem)
//nHalos updated
nhalo[y_pos] = result;
return;
}
//Last GPU
else if (deviceID == (numDevices - 1)) {
//We need to use shalos
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
//The Bottom boundary needs elements from shalo
if (index == bottomBoundaryElem)
//nHalos
result -= A0[index] * shalo[y_pos];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
result /= A2[index];
x_out[index] = result;
//Update Halo at the end of computation
if (index == bottomBoundaryElem)
//sHalos updated
shalo[y_pos] = result;
return;
}
//For all the middle GPUs
else
{
//We need to use both shalos and nhalos
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
//The Bottom boundary needs elements from shalo
if (index == bottomBoundaryElem)
//nHalos
result -= A0[index] * shalo[y_pos];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
//The top boundary needs element from nhalo
if (index == topBoundaryElem)
//nHalos
result -= A4[index] * nhalo[y_pos];
result /= A2[index];
x_out[index] = result;
//Update Halo at the end of computation
if (index == bottomBoundaryElem)
//sHalos updated
shalo[y_pos] = result;
//Update Halo at the end of computation
if (index == topBoundaryElem)
//nHalos updated
nhalo[y_pos] = result;
return;
}
}
//For computations on a Machine with a single GPU
else
{
{//For some reason order of computation (left,right,top and bottom) gives a different result
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
result /= A2[index];
x_out[index] = result;
return;
}
}
}
//Init Halos: In 1D decomposition only North and South Halos are used. In 2D decomposition North, South, East and West Halo need to be initialized and computed
//In 3D decomposition North, South, East , West, Top and Bottom needs to be initialized and computed
void initHalos(int numDevices, vector<create_DeviceHalos> &deviceArray, int dim_x, float *vec_in) {
deviceArray.resize(numDevices);
int chunksize = ((dim_x*dim_x) / numDevices);
cout << "chunk size is :" << chunksize << endl;
for (int i = 0, pos = chunksize; i < numDevices; pos += chunksize, i++) {
deviceArray[i].deviceID = i;
deviceArray[i].nHalo.resize(dim_x);
//TODO: 2D halo exchange
//TODO: deviceArray[i].eHalo.resize(dim_x);
//TODO: deviceArray[i].wHalo.resize(dim_x);
deviceArray[i].sHalo.resize(dim_x);
if (numDevices == 1)
{
for (int count = 0; count<dim_x; count++)
{
deviceArray[i].nHalo[count] = 1.0f;
deviceArray[i].sHalo[count] = 1.0f;
}
return;
}
//First Device needs only nHalo
if (i == 0)
{
for (int k = pos, count = 0; count<dim_x; k++, count++)
{
cout << "Halo nPosition for first Device is : " << k << endl;
deviceArray[i].nHalo[count] = vec_in[k];
}
}
//Last device needs only sHalo
else if (i == (numDevices - 1))
{
for (int k = pos - (chunksize + dim_x), count = 0; count<dim_x; count++, k++)
{
cout << "Halo sPosition for Last Device is : " << k << endl;
deviceArray[i].sHalo[count] = vec_in[k];
}
}
//All the other devices need both sHalo and nHalo
else
{
for (int k = pos, count = 0; count<dim_x; count++, k++)
{
cout << "Halo nPosition for Mid Device " << i << " is : " << k << endl;
deviceArray[i].nHalo[count] = vec_in[k];
}
for (int k = pos - (chunksize + dim_x), count = 0; count<dim_x; count++, k++)
{
cout << "Halo sPosition for Mid Device " << i << " is : " << k << endl;
deviceArray[i].sHalo[count] = vec_in[k];
}
}
}
}
//Init matrix Diagonals A0, A1, A2, A3, A4
void copyValues(float *A0, float *A1, float *A2, float *A3, float *A4, float *rhs, float *vec_in, float *vec_out, int dim, float *val_A0, float *val_A1, float *val_A2, float *val_A3, float *val_A4, float *val_rhs, float *val_x_in)
{
unsigned int size = dim * dim;
for (unsigned int i = 0; i < size; i++)
{
A0[i] = val_A0[i];
A1[i] = val_A1[i];
A2[i] = val_A2[i];
A3[i] = val_A3[i];
A4[i] = val_A4[i];
rhs[i] = val_rhs[i];
vec_in[i] = val_x_in[i];
vec_out[i] = 0.0f;
}
}
void getAllDeviceProperties() {
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
cout<<" Device Number: " << i <<endl;
cout<<" Device name: "<<prop.name<<endl;
cout<<" Memory Clock Rate (KHz): "<<prop.memoryClockRate<<endl;
cout<<" Memory Bus Width (bits): "<<prop.memoryBusWidth << endl;;
cout<<" Peak Memory Bandwidth (GB/s): "<<2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6<<endl<<endl<<endl;
}
}
hipError_t performMultiGPUJacobi(unsigned int val_dim, unsigned int numJacobiIt, float* val_A0, float* val_A1, float* val_A2, float* val_A3,float* val_A4, float* val_rhs, float* val_x_in)
{
//Fixed value changed later
int dim = 8;
if(val_dim!=0){
dim = val_dim;
}
//TODO: write a 2D domain decomposition method for more than 2 GPUs
int size = dim * dim;
//auto result = make_unique<float[]>(size);
//Create Diagonal Vectors
std::vector<float> a0(size);
std::vector<float> a1(size);
std::vector<float> a2(size);
std::vector<float> a3(size);
std::vector<float> a4(size);
std::vector<float> vec_in(size);
std::vector<float> vec_out(size);
std::vector<float> rhs(size);
std::vector<float> result(size);
//Used for exchanging the Halos after each Jacobi Iteration
std::vector<float> prev_nHalo(dim);
std::vector<float> curr_sHalo(dim);
//Get the total number of devices
int numDevices;
hipGetDeviceCount(&numDevices);
cout << endl << "Total number of Devices in the System are : " << numDevices << endl;
getAllDeviceProperties();
//Configuring the number of GPU's manually
//numDevices=1;
copyValues(&a0[0], &a1[0], &a2[0], &a3[0], &a4[0], &rhs[0], &vec_in[0], &vec_out[0], dim, &val_A0[0], &val_A1[0], &val_A2[0], &val_A3[0], &val_A4[0], &val_rhs[0], &val_x_in[0]);
vector<create_DeviceHalos> deviceArray;
/* Distributed Compuation using Halos: Algorithm
1. Init Halos.
1.a) In 1D decomposition nhalo and shalo intialized from vector x_in
1.b) In 2D decompsition nhalo,shalo, ehalo and whalo initialozed from vector x_in
2. Pass the halos to Jacobi_kernal.
3. Store the result computed at the boundary into the halo boundary positions.
4. Swap nhalo and shalo pairs in 1D decompostion. Swap (nhalo,shalo) and (ehalo,whalo) in 2D.
*/
initHalos(numDevices, deviceArray, dim, &vec_in[0]);
//Display Halos
if (numDevices>1) {
cout << endl << "Halo Init.." << endl;
for (int i = 0; i < numDevices; i++) {
cout << "Device ID: " << deviceArray[i].deviceID;
//First Device needs only nHalo
if (i == 0)
{
cout << "First Device";
for (int k = 0; k<dim; k++)
{
cout << deviceArray[i].nHalo[k];
}
}
//Last device needs only sHalo
else if (i == (numDevices - 1))
{
cout << "Last Device";
for (int k = 0; k<dim; k++)
{
cout << deviceArray[i].sHalo[k];
}
}
//All the other devices need both sHalo and nHalo
else
{
cout << "Middle Device";
for (int k = 0; k<dim; k++)
{
cout << deviceArray[i].nHalo[k];
}
for (int k = 0; k<dim; k++)
{
cout << deviceArray[i].sHalo[k];
}
}
cout << endl;
}
cout << endl;
cout << endl;
cout << endl;
}
cout << "A0 ....";
for (int i = 0; i < size; i++) {
cout << a0[i] << " ";
}
cout << endl;
cout << "A1 ....";
for (int i = 0; i < size; i++) {
cout << a1[i] << " ";
}
cout << endl;
cout << "A2 ....";
for (int i = 0; i < size; i++) {
cout << a2[i] << " ";
}
cout << endl;
cout << "A3 ....";
for (int i = 0; i < size; i++) {
cout << a3[i] << " ";
}
cout << endl;
cout << "A4 ....";
for (int i = 0; i < size; i++) {
cout << a4[i] << " ";
}
cout << endl;
cout << "RHS ....";
for (int i = 0; i < size; i++) {
cout << rhs[i] << " ";
}
cout << endl;
cout << "Vec In ...." << endl;
for (int i = size - 1; i >= 0; i--) {
if ((i + 1) % dim == 0) { cout << endl; }
cout << vec_in[i] << " ";
}
cout << endl;
cout << "Made it here..";
//Allocate memory on the devices
//Let the total number of GPU be 2 : has to be changed later
//Computation divided into (size/2) on first and size-(size/2) on second
int *domainDivision;
domainDivision = new int[numDevices];
//Logic for total chunk per device (Domain distribution)
for (int i = 0; i < numDevices; i++) {
//if(!(i==numDevices-1)){
domainDivision[i] = size / numDevices;
//size = (size - size / numDevices);
//}
}
//For use on Device
float *d_A0[4],
*d_A1[4],
*d_A2[4],
*d_A3[4],
*d_A4[4],
*d_Vec_In[4],
*d_Vec_Out[4],
*d_Rhs[4],
*d_nhalos[4],
*d_shalos[4];
/* The domain division is done in 1D rowise */
for (int dev = 0; dev<numDevices; dev++)
{
//Setting the device before allocation
hipSetDevice(dev);
//cudamalloc the Diagonals
hipMalloc((void**)&d_A0[dev], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_A1[dev], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_A2[dev], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_A3[dev], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_A4[dev], domainDivision[dev] * sizeof(float));
//cudamalloc the Input Vector and Result vector
hipMalloc((void**)&d_Vec_In[dev], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_Vec_Out[dev], domainDivision[dev] * sizeof(float));
hipMalloc((void**)&d_Rhs[dev], domainDivision[dev] * sizeof(float));
//hipMalloc Halos: North and South--1D. TODO: East and West for 2D
hipMalloc((void**)&d_nhalos[dev], dim * sizeof(float));
hipMalloc((void**)&d_shalos[dev], dim * sizeof(float));
}
/* The transfer of Data from Host to Device */
for (int dev = 0, pos = 0; dev<numDevices; pos += domainDivision[dev], dev++)
{
//Setting the device before allocation
hipSetDevice(dev);
//Copy the diagonals from host to device
hipMemcpy(d_A0[dev], &a0[0] + pos, domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_A1[dev], &a1[0] + pos, domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_A2[dev], &a2[0] + pos, domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_A3[dev], &a3[0] + pos, domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_A4[dev], &a4[0] + pos, domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
//Copy in and out vectors and RHS
hipMemcpy(d_Vec_In[dev], &vec_in[0] + pos, domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_Vec_Out[dev], &vec_out[0] + pos, domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_Rhs[dev], &rhs[0] + pos, domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
//Copy intial Halos in 1D : TODO compute more than 1D
if (dev == 0) {
hipMemcpy(d_nhalos[dev], &deviceArray[dev].nHalo[0], dim * sizeof(float), hipMemcpyHostToDevice);
}
else if (dev == (numDevices - 1)) {
hipMemcpy(d_shalos[dev], &deviceArray[dev].sHalo[0], dim * sizeof(float), hipMemcpyHostToDevice);
}
else {
hipMemcpy(d_nhalos[dev], &deviceArray[dev].nHalo[0], dim * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_shalos[dev], &deviceArray[dev].sHalo[0], dim * sizeof(float), hipMemcpyHostToDevice);
}
}
if (auto err = hipGetLastError())
{
cout << "Jacobi launch failed: " << hipGetErrorString(err) << endl;
return err;
}
if (auto err = hipGetLastError())
{
cout << "Jacobi launch failed: " << hipGetErrorString(err) << endl;
return err;
}
if (auto err = hipGetLastError())
{
cout << "Jacobi launch failed: " << hipGetErrorString(err) << endl;
return err;
}
//multMatrix(d_A0, d_A1, d_A2, d_A3, d_A4, myDim, d_vec, d_res);
//Perform one Jacobi Step
int blocksize = dim / numDevices; //TODO: make it to more than 2 GPUs
int threads = dim;
//Call to kernal
int iterations = 4;
if (numJacobiIt != 0) {
iterations = numJacobiIt;
}
for (int i = 0; i<iterations; i++)
{
//cout << endl << endl << "Iteration : " << i + 1 << endl << endl << endl;
//TODO: optimization using kernel instead of For Loop
for (int dev = 0, pos = 0; dev<numDevices; pos += domainDivision[dev], dev++)
{
hipSetDevice(dev);
/*cout << endl << endl << "Kernal Execution on GPU : " << dev;
cout << endl << "Position :" << pos;
cout << endl << "Check Intermediate Result before it gets passed to kernal" << endl;
hipMemcpy(&result[0] + pos, d_Vec_In[dev], domainDivision[dev] * sizeof(float), hipMemcpyDeviceToHost);
for (int i = size - 1; i >= 0; i--) {
if ((i + 1) % dim == 0) { cout << endl; }
cout << "#pos:" << i << " " << result[i] << " ";
}*/
jacobi_Simple << <blocksize, threads >> >(d_A0[dev], d_A1[dev], d_A2[dev], d_A3[dev], d_A4[dev], d_Vec_In[dev], d_Vec_Out[dev], d_Rhs[dev], d_nhalos[dev], d_shalos[dev], deviceArray[dev].deviceID, numDevices);
//TODO: Currently serial has to be done hipMemcpyAsync using CUDA Streams
//Copy the intermediate result from Device to Host memory
hipMemcpy(&result[0] + pos, d_Vec_Out[dev], domainDivision[dev] * sizeof(float), hipMemcpyDeviceToHost);
//Copy the intermediate result from the Host memory to the Device memory
hipMemcpy(d_Vec_In[dev], &result[0] + pos, domainDivision[dev] * sizeof(float), hipMemcpyHostToDevice);
/* Store Halo positions after iteration for exchanging */
if (numDevices>1)
{
if (dev == 0) {
hipMemcpy(&prev_nHalo[0], d_nhalos[dev], dim * sizeof(float), hipMemcpyDeviceToHost);
}
else if (dev == (numDevices - 1)) {
//Exchange Happens here
hipMemcpy(&curr_sHalo[0], d_shalos[dev], dim * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(d_shalos[dev], &prev_nHalo[0], dim * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_nhalos[dev - 1], &curr_sHalo[0], dim * sizeof(float), hipMemcpyHostToDevice);
}
else {
//Exchange Happens here
hipMemcpy(&curr_sHalo[0], d_shalos[dev], dim * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(d_shalos[dev], &prev_nHalo[0], dim * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_nhalos[dev - 1], &curr_sHalo[0], dim * sizeof(float), hipMemcpyHostToDevice);
//Store current North Boundary in prev_halo for exchanging in later step
hipMemcpy(&prev_nHalo[0], d_nhalos[dev], dim * sizeof(float), hipMemcpyDeviceToHost);
}
}
}
//TODO: Using P2P to be done later
//exchangeHalos(numDevices,result, d_Vec_In);
//Exchange halo logic
//1. Prev = current nhalo
//2. On next iteration shalo = Prev and, Prev = nhalo.
}
if (auto err = hipGetLastError())
{
cout << "Jacobi launch failed: " << hipGetErrorString(err) << endl;
return err;
}
cout << endl << "Iterations successful " << endl;
//Copy the final result from all devices
for (int dev = 0, pos = 0; dev < numDevices; pos += domainDivision[dev], dev++)
{
hipMemcpy(&result[0] + pos, d_Vec_Out[dev], domainDivision[dev] * sizeof(float), hipMemcpyDeviceToHost);
}
if (auto err = hipGetLastError())
{
cout << "Jacobi launch failed: " << hipGetErrorString(err) << endl;
return err;
}
//Print result
for (int i = size - 1; i >= 0; i--) {
if ((i + 1) % dim == 0) { cout << endl; }
cout << result[i] << " ";
}
// Freeing memory auto done by cuda deleter
//Free memory on devices
for (int dev = 0; dev<numDevices; dev++)
{
hipFree(d_A0[dev]);
hipFree(d_A1[dev]);
hipFree(d_A2[dev]);
hipFree(d_A3[dev]);
hipFree(d_A4[dev]);
hipFree(d_Vec_In[dev]);
hipFree(d_Vec_Out[dev]);
hipFree(d_nhalos[dev]);
hipFree(d_shalos[dev]);
hipFree(d_Rhs[dev]);
}
cout << endl << "Device Memory free successful.";
//Take care of dynamic mem location
delete[] domainDivision;
return hipSuccess;
}
int performJacobi_MultiGPU(unsigned int dim, unsigned int numJacobiIt, float* A0, float* A1, float* A2, float* A3,float* A4, float* rhs, float* x_in)
{
hipError_t cudaStatus = performMultiGPUJacobi(dim, numJacobiIt, &A0[0], &A1[0], &A2[0], &A3[0], &A4[0], &rhs[0], &x_in[0]);
if (cudaStatus != hipSuccess) {
cout << "Computation failed: " << endl;
return 1;
}
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
cout << "Cuda Device Reset failed: "<< endl;
return 1;
}
return 0;
}
| 825ee48fe4fe0f1b02c66023d174cfa34e943754.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "testMultiGPU_Jacobi.cuh"
#include <iostream>
#include <chrono>
#include <memory>
#include <vector>
using namespace std;
using namespace std::chrono;
#define IMUL(a,b) __mul24(a,b)
//cudaError_t performMultiGPUJacobi();
//Support for below c++14 on *nix
template<typename T, typename ...Args>
std::unique_ptr<T> make_unique(Args&& ...args)
{
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
struct create_DeviceHalos
{
int deviceID;
vector<float> eHalo;
vector<float> wHalo;
vector<float> nHalo;
vector<float> sHalo;
};
//Simple Jacobi iteration
__global__ void jacobi_Simple(const float *A0, const float *A1, const float *A2, const float *A3, const float *A4, float *x_in, float *x_out, const float *rhs, float *nhalo, float *shalo, const int deviceID, const int numDevices)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
float result = rhs[index];
int dim_x = blockDim.x;// dim across x
int dim_y = gridDim.x;
int x_pos = blockIdx.x;
int y_pos = threadIdx.x;
//result = nhalo[y_pos];
//x_out[index] = result;
//Get the boundaries
int leftBoundaryElem = x_pos * (dim_x);
int rightBoundaryElem = (x_pos * dim_x) + (dim_x - 1);
int topBoundaryElem = y_pos + ((dim_y - 1) * (dim_x));
int bottomBoundaryElem = y_pos;
//Halo computation for 1D Decompostion: For the First and Last GPU Halo computation on both the sides(nhalo and shalo wont be needed)
if (numDevices>1)
{
//First GPU
if (deviceID == 0) {
//We need to use nhalos
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
//The top boundary needs element from nhalo
if (index == topBoundaryElem)
//nHalos
result -= A4[index] * nhalo[y_pos];
result /= A2[index];
x_out[index] = result;
//Update Halo at the end of computation
if (index == topBoundaryElem)
//nHalos updated
nhalo[y_pos] = result;
return;
}
//Last GPU
else if (deviceID == (numDevices - 1)) {
//We need to use shalos
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
//The Bottom boundary needs elements from shalo
if (index == bottomBoundaryElem)
//nHalos
result -= A0[index] * shalo[y_pos];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
result /= A2[index];
x_out[index] = result;
//Update Halo at the end of computation
if (index == bottomBoundaryElem)
//sHalos updated
shalo[y_pos] = result;
return;
}
//For all the middle GPUs
else
{
//We need to use both shalos and nhalos
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
//The Bottom boundary needs elements from shalo
if (index == bottomBoundaryElem)
//nHalos
result -= A0[index] * shalo[y_pos];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
//The top boundary needs element from nhalo
if (index == topBoundaryElem)
//nHalos
result -= A4[index] * nhalo[y_pos];
result /= A2[index];
x_out[index] = result;
//Update Halo at the end of computation
if (index == bottomBoundaryElem)
//sHalos updated
shalo[y_pos] = result;
//Update Halo at the end of computation
if (index == topBoundaryElem)
//nHalos updated
nhalo[y_pos] = result;
return;
}
}
//For computations on a Machine with a single GPU
else
{
{//For some reason order of computation (left,right,top and bottom) gives a different result
//Carry out computations for boundary elements
if (index != leftBoundaryElem)
//Left
result -= A1[index] * x_in[index - 1];
if (index != rightBoundaryElem)
//Right
result -= A3[index] * x_in[index + 1];
if (index != bottomBoundaryElem)
//Bottom
result -= A0[index] * x_in[index - dim_x];
if (index != topBoundaryElem)
//Top
result -= A4[index] * x_in[index + dim_x];
result /= A2[index];
x_out[index] = result;
return;
}
}
}
//Init Halos: In 1D decomposition only North and South Halos are used. In 2D decomposition North, South, East and West Halo need to be initialized and computed
//In 3D decomposition North, South, East , West, Top and Bottom needs to be initialized and computed
void initHalos(int numDevices, vector<create_DeviceHalos> &deviceArray, int dim_x, float *vec_in) {
deviceArray.resize(numDevices);
int chunksize = ((dim_x*dim_x) / numDevices);
cout << "chunk size is :" << chunksize << endl;
for (int i = 0, pos = chunksize; i < numDevices; pos += chunksize, i++) {
deviceArray[i].deviceID = i;
deviceArray[i].nHalo.resize(dim_x);
//TODO: 2D halo exchange
//TODO: deviceArray[i].eHalo.resize(dim_x);
//TODO: deviceArray[i].wHalo.resize(dim_x);
deviceArray[i].sHalo.resize(dim_x);
if (numDevices == 1)
{
for (int count = 0; count<dim_x; count++)
{
deviceArray[i].nHalo[count] = 1.0f;
deviceArray[i].sHalo[count] = 1.0f;
}
return;
}
//First Device needs only nHalo
if (i == 0)
{
for (int k = pos, count = 0; count<dim_x; k++, count++)
{
cout << "Halo nPosition for first Device is : " << k << endl;
deviceArray[i].nHalo[count] = vec_in[k];
}
}
//Last device needs only sHalo
else if (i == (numDevices - 1))
{
for (int k = pos - (chunksize + dim_x), count = 0; count<dim_x; count++, k++)
{
cout << "Halo sPosition for Last Device is : " << k << endl;
deviceArray[i].sHalo[count] = vec_in[k];
}
}
//All the other devices need both sHalo and nHalo
else
{
for (int k = pos, count = 0; count<dim_x; count++, k++)
{
cout << "Halo nPosition for Mid Device " << i << " is : " << k << endl;
deviceArray[i].nHalo[count] = vec_in[k];
}
for (int k = pos - (chunksize + dim_x), count = 0; count<dim_x; count++, k++)
{
cout << "Halo sPosition for Mid Device " << i << " is : " << k << endl;
deviceArray[i].sHalo[count] = vec_in[k];
}
}
}
}
//Init matrix Diagonals A0, A1, A2, A3, A4
void copyValues(float *A0, float *A1, float *A2, float *A3, float *A4, float *rhs, float *vec_in, float *vec_out, int dim, float *val_A0, float *val_A1, float *val_A2, float *val_A3, float *val_A4, float *val_rhs, float *val_x_in)
{
unsigned int size = dim * dim;
for (unsigned int i = 0; i < size; i++)
{
A0[i] = val_A0[i];
A1[i] = val_A1[i];
A2[i] = val_A2[i];
A3[i] = val_A3[i];
A4[i] = val_A4[i];
rhs[i] = val_rhs[i];
vec_in[i] = val_x_in[i];
vec_out[i] = 0.0f;
}
}
void getAllDeviceProperties() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
cout<<" Device Number: " << i <<endl;
cout<<" Device name: "<<prop.name<<endl;
cout<<" Memory Clock Rate (KHz): "<<prop.memoryClockRate<<endl;
cout<<" Memory Bus Width (bits): "<<prop.memoryBusWidth << endl;;
cout<<" Peak Memory Bandwidth (GB/s): "<<2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6<<endl<<endl<<endl;
}
}
cudaError_t performMultiGPUJacobi(unsigned int val_dim, unsigned int numJacobiIt, float* val_A0, float* val_A1, float* val_A2, float* val_A3,float* val_A4, float* val_rhs, float* val_x_in)
{
//Fixed value changed later
int dim = 8;
if(val_dim!=0){
dim = val_dim;
}
//TODO: write a 2D domain decomposition method for more than 2 GPUs
int size = dim * dim;
//auto result = make_unique<float[]>(size);
//Create Diagonal Vectors
std::vector<float> a0(size);
std::vector<float> a1(size);
std::vector<float> a2(size);
std::vector<float> a3(size);
std::vector<float> a4(size);
std::vector<float> vec_in(size);
std::vector<float> vec_out(size);
std::vector<float> rhs(size);
std::vector<float> result(size);
//Used for exchanging the Halos after each Jacobi Iteration
std::vector<float> prev_nHalo(dim);
std::vector<float> curr_sHalo(dim);
//Get the total number of devices
int numDevices;
cudaGetDeviceCount(&numDevices);
cout << endl << "Total number of Devices in the System are : " << numDevices << endl;
getAllDeviceProperties();
//Configuring the number of GPU's manually
//numDevices=1;
copyValues(&a0[0], &a1[0], &a2[0], &a3[0], &a4[0], &rhs[0], &vec_in[0], &vec_out[0], dim, &val_A0[0], &val_A1[0], &val_A2[0], &val_A3[0], &val_A4[0], &val_rhs[0], &val_x_in[0]);
vector<create_DeviceHalos> deviceArray;
/* Distributed Compuation using Halos: Algorithm
1. Init Halos.
1.a) In 1D decomposition nhalo and shalo intialized from vector x_in
1.b) In 2D decompsition nhalo,shalo, ehalo and whalo initialozed from vector x_in
2. Pass the halos to Jacobi_kernal.
3. Store the result computed at the boundary into the halo boundary positions.
4. Swap nhalo and shalo pairs in 1D decompostion. Swap (nhalo,shalo) and (ehalo,whalo) in 2D.
*/
initHalos(numDevices, deviceArray, dim, &vec_in[0]);
//Display Halos
if (numDevices>1) {
cout << endl << "Halo Init.." << endl;
for (int i = 0; i < numDevices; i++) {
cout << "Device ID: " << deviceArray[i].deviceID;
//First Device needs only nHalo
if (i == 0)
{
cout << "First Device";
for (int k = 0; k<dim; k++)
{
cout << deviceArray[i].nHalo[k];
}
}
//Last device needs only sHalo
else if (i == (numDevices - 1))
{
cout << "Last Device";
for (int k = 0; k<dim; k++)
{
cout << deviceArray[i].sHalo[k];
}
}
//All the other devices need both sHalo and nHalo
else
{
cout << "Middle Device";
for (int k = 0; k<dim; k++)
{
cout << deviceArray[i].nHalo[k];
}
for (int k = 0; k<dim; k++)
{
cout << deviceArray[i].sHalo[k];
}
}
cout << endl;
}
cout << endl;
cout << endl;
cout << endl;
}
cout << "A0 ....";
for (int i = 0; i < size; i++) {
cout << a0[i] << " ";
}
cout << endl;
cout << "A1 ....";
for (int i = 0; i < size; i++) {
cout << a1[i] << " ";
}
cout << endl;
cout << "A2 ....";
for (int i = 0; i < size; i++) {
cout << a2[i] << " ";
}
cout << endl;
cout << "A3 ....";
for (int i = 0; i < size; i++) {
cout << a3[i] << " ";
}
cout << endl;
cout << "A4 ....";
for (int i = 0; i < size; i++) {
cout << a4[i] << " ";
}
cout << endl;
cout << "RHS ....";
for (int i = 0; i < size; i++) {
cout << rhs[i] << " ";
}
cout << endl;
cout << "Vec In ...." << endl;
for (int i = size - 1; i >= 0; i--) {
if ((i + 1) % dim == 0) { cout << endl; }
cout << vec_in[i] << " ";
}
cout << endl;
cout << "Made it here..";
//Allocate memory on the devices
//Let the total number of GPU be 2 : has to be changed later
//Computation divided into (size/2) on first and size-(size/2) on second
int *domainDivision;
domainDivision = new int[numDevices];
//Logic for total chunk per device (Domain distribution)
for (int i = 0; i < numDevices; i++) {
//if(!(i==numDevices-1)){
domainDivision[i] = size / numDevices;
//size = (size - size / numDevices);
//}
}
//For use on Device
float *d_A0[4],
*d_A1[4],
*d_A2[4],
*d_A3[4],
*d_A4[4],
*d_Vec_In[4],
*d_Vec_Out[4],
*d_Rhs[4],
*d_nhalos[4],
*d_shalos[4];
/* The domain division is done in 1D rowise */
for (int dev = 0; dev<numDevices; dev++)
{
//Setting the device before allocation
cudaSetDevice(dev);
//cudamalloc the Diagonals
cudaMalloc((void**)&d_A0[dev], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_A1[dev], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_A2[dev], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_A3[dev], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_A4[dev], domainDivision[dev] * sizeof(float));
//cudamalloc the Input Vector and Result vector
cudaMalloc((void**)&d_Vec_In[dev], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_Vec_Out[dev], domainDivision[dev] * sizeof(float));
cudaMalloc((void**)&d_Rhs[dev], domainDivision[dev] * sizeof(float));
//cudaMalloc Halos: North and South--1D. TODO: East and West for 2D
cudaMalloc((void**)&d_nhalos[dev], dim * sizeof(float));
cudaMalloc((void**)&d_shalos[dev], dim * sizeof(float));
}
/* The transfer of Data from Host to Device */
for (int dev = 0, pos = 0; dev<numDevices; pos += domainDivision[dev], dev++)
{
//Setting the device before allocation
cudaSetDevice(dev);
//Copy the diagonals from host to device
cudaMemcpy(d_A0[dev], &a0[0] + pos, domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_A1[dev], &a1[0] + pos, domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_A2[dev], &a2[0] + pos, domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_A3[dev], &a3[0] + pos, domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_A4[dev], &a4[0] + pos, domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
//Copy in and out vectors and RHS
cudaMemcpy(d_Vec_In[dev], &vec_in[0] + pos, domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Vec_Out[dev], &vec_out[0] + pos, domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Rhs[dev], &rhs[0] + pos, domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
//Copy intial Halos in 1D : TODO compute more than 1D
if (dev == 0) {
cudaMemcpy(d_nhalos[dev], &deviceArray[dev].nHalo[0], dim * sizeof(float), cudaMemcpyHostToDevice);
}
else if (dev == (numDevices - 1)) {
cudaMemcpy(d_shalos[dev], &deviceArray[dev].sHalo[0], dim * sizeof(float), cudaMemcpyHostToDevice);
}
else {
cudaMemcpy(d_nhalos[dev], &deviceArray[dev].nHalo[0], dim * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_shalos[dev], &deviceArray[dev].sHalo[0], dim * sizeof(float), cudaMemcpyHostToDevice);
}
}
if (auto err = cudaGetLastError())
{
cout << "Jacobi launch failed: " << cudaGetErrorString(err) << endl;
return err;
}
if (auto err = cudaGetLastError())
{
cout << "Jacobi launch failed: " << cudaGetErrorString(err) << endl;
return err;
}
if (auto err = cudaGetLastError())
{
cout << "Jacobi launch failed: " << cudaGetErrorString(err) << endl;
return err;
}
//multMatrix(d_A0, d_A1, d_A2, d_A3, d_A4, myDim, d_vec, d_res);
//Perform one Jacobi Step
int blocksize = dim / numDevices; //TODO: make it to more than 2 GPUs
int threads = dim;
//Call to kernal
int iterations = 4;
if (numJacobiIt != 0) {
iterations = numJacobiIt;
}
for (int i = 0; i<iterations; i++)
{
//cout << endl << endl << "Iteration : " << i + 1 << endl << endl << endl;
//TODO: optimization using kernel instead of For Loop
for (int dev = 0, pos = 0; dev<numDevices; pos += domainDivision[dev], dev++)
{
cudaSetDevice(dev);
/*cout << endl << endl << "Kernal Execution on GPU : " << dev;
cout << endl << "Position :" << pos;
cout << endl << "Check Intermediate Result before it gets passed to kernal" << endl;
cudaMemcpy(&result[0] + pos, d_Vec_In[dev], domainDivision[dev] * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = size - 1; i >= 0; i--) {
if ((i + 1) % dim == 0) { cout << endl; }
cout << "#pos:" << i << " " << result[i] << " ";
}*/
jacobi_Simple << <blocksize, threads >> >(d_A0[dev], d_A1[dev], d_A2[dev], d_A3[dev], d_A4[dev], d_Vec_In[dev], d_Vec_Out[dev], d_Rhs[dev], d_nhalos[dev], d_shalos[dev], deviceArray[dev].deviceID, numDevices);
//TODO: Currently serial has to be done cudaMemcpyAsync using CUDA Streams
//Copy the intermediate result from Device to Host memory
cudaMemcpy(&result[0] + pos, d_Vec_Out[dev], domainDivision[dev] * sizeof(float), cudaMemcpyDeviceToHost);
//Copy the intermediate result from the Host memory to the Device memory
cudaMemcpy(d_Vec_In[dev], &result[0] + pos, domainDivision[dev] * sizeof(float), cudaMemcpyHostToDevice);
/* Store Halo positions after iteration for exchanging */
if (numDevices>1)
{
if (dev == 0) {
cudaMemcpy(&prev_nHalo[0], d_nhalos[dev], dim * sizeof(float), cudaMemcpyDeviceToHost);
}
else if (dev == (numDevices - 1)) {
//Exchange Happens here
cudaMemcpy(&curr_sHalo[0], d_shalos[dev], dim * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(d_shalos[dev], &prev_nHalo[0], dim * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_nhalos[dev - 1], &curr_sHalo[0], dim * sizeof(float), cudaMemcpyHostToDevice);
}
else {
//Exchange Happens here
cudaMemcpy(&curr_sHalo[0], d_shalos[dev], dim * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(d_shalos[dev], &prev_nHalo[0], dim * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_nhalos[dev - 1], &curr_sHalo[0], dim * sizeof(float), cudaMemcpyHostToDevice);
//Store current North Boundary in prev_halo for exchanging in later step
cudaMemcpy(&prev_nHalo[0], d_nhalos[dev], dim * sizeof(float), cudaMemcpyDeviceToHost);
}
}
}
//TODO: Using P2P to be done later
//exchangeHalos(numDevices,result, d_Vec_In);
//Exchange halo logic
//1. Prev = current nhalo
//2. On next iteration shalo = Prev and, Prev = nhalo.
}
if (auto err = cudaGetLastError())
{
cout << "Jacobi launch failed: " << cudaGetErrorString(err) << endl;
return err;
}
cout << endl << "Iterations successful " << endl;
//Copy the final result from all devices
for (int dev = 0, pos = 0; dev < numDevices; pos += domainDivision[dev], dev++)
{
cudaMemcpy(&result[0] + pos, d_Vec_Out[dev], domainDivision[dev] * sizeof(float), cudaMemcpyDeviceToHost);
}
if (auto err = cudaGetLastError())
{
cout << "Jacobi launch failed: " << cudaGetErrorString(err) << endl;
return err;
}
//Print result
for (int i = size - 1; i >= 0; i--) {
if ((i + 1) % dim == 0) { cout << endl; }
cout << result[i] << " ";
}
// Freeing memory auto done by cuda deleter
//Free memory on devices
for (int dev = 0; dev<numDevices; dev++)
{
cudaFree(d_A0[dev]);
cudaFree(d_A1[dev]);
cudaFree(d_A2[dev]);
cudaFree(d_A3[dev]);
cudaFree(d_A4[dev]);
cudaFree(d_Vec_In[dev]);
cudaFree(d_Vec_Out[dev]);
cudaFree(d_nhalos[dev]);
cudaFree(d_shalos[dev]);
cudaFree(d_Rhs[dev]);
}
cout << endl << "Device Memory free successful.";
//Take care of dynamic mem location
delete[] domainDivision;
return cudaSuccess;
}
int performJacobi_MultiGPU(unsigned int dim, unsigned int numJacobiIt, float* A0, float* A1, float* A2, float* A3,float* A4, float* rhs, float* x_in)
{
cudaError_t cudaStatus = performMultiGPUJacobi(dim, numJacobiIt, &A0[0], &A1[0], &A2[0], &A3[0], &A4[0], &rhs[0], &x_in[0]);
if (cudaStatus != cudaSuccess) {
cout << "Computation failed: " << endl;
return 1;
}
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
cout << "Cuda Device Reset failed: "<< endl;
return 1;
}
return 0;
}
|
5575fe39dc7d7d076244529985ede79cacdcc8f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Device code for vector reduction.
Author: Naga Kandasamy
Date modified: 02/14/2017
*/
#ifndef _SCAN_NAIVE_KERNEL_H_
#define _SCAN_NAIVE_KERNEL_H_
#define NUM_ELEMENTS 512
// This kernel performs reduction using a tree-style reduction technique that increases divergent branching between threads in a warp
__global__ void reduction_v1(float *g_data, int n)
{
__shared__ float partialSum[NUM_ELEMENTS];
// Find our place in thread block/grid
unsigned int threadID = threadIdx.x;
unsigned int dataID = blockIdx.x * blockDim.x + threadIdx.x;
// Populate shared memory with data from global memory
if(dataID < n)
partialSum[threadID] = g_data[dataID];
else
partialSum[threadID] = 0.0;
__syncthreads();
// Calculate partial sum
for (unsigned int stride = 1; stride < blockDim.x; stride *= 2){
if (threadID % (2 * stride) == 0)
partialSum[threadID] += partialSum[threadID + stride];
__syncthreads();
}
// Store result in the appropriate place in the output stream
if (threadID == 0)
g_data[blockIdx.x] = partialSum[0];
}
// This kernel performs reduction in a fashion that reduces divergent branching between threads in a warp
__global__ void reduction_v2(float *g_data, int n)
{
__shared__ float partialSum[NUM_ELEMENTS];
// Find our place in thread block/grid
unsigned int threadID = threadIdx.x;
unsigned int dataID = blockIdx.x * blockDim.x + threadIdx.x;
// Copy data to shared memory from global memory
if(dataID < n)
partialSum[threadID] = g_data[dataID];
else
partialSum[threadID] = 0.0;
__syncthreads();
for(unsigned int stride = blockDim.x >> 1; stride > 0; stride = stride >> 1){
if(threadID < stride)
partialSum[threadID] += partialSum[threadID + stride];
__syncthreads();
}
// Store result in the appropriate place in the output stream
if(threadID == 0)
g_data[blockIdx.x] = partialSum[0];
}
#endif // #ifndef _SCAN_NAIVE_KERNEL_H_
| 5575fe39dc7d7d076244529985ede79cacdcc8f3.cu |
/*
Device code for vector reduction.
Author: Naga Kandasamy
Date modified: 02/14/2017
*/
#ifndef _SCAN_NAIVE_KERNEL_H_
#define _SCAN_NAIVE_KERNEL_H_
#define NUM_ELEMENTS 512
// This kernel performs reduction using a tree-style reduction technique that increases divergent branching between threads in a warp
__global__ void reduction_v1(float *g_data, int n)
{
__shared__ float partialSum[NUM_ELEMENTS];
// Find our place in thread block/grid
unsigned int threadID = threadIdx.x;
unsigned int dataID = blockIdx.x * blockDim.x + threadIdx.x;
// Populate shared memory with data from global memory
if(dataID < n)
partialSum[threadID] = g_data[dataID];
else
partialSum[threadID] = 0.0;
__syncthreads();
// Calculate partial sum
for (unsigned int stride = 1; stride < blockDim.x; stride *= 2){
if (threadID % (2 * stride) == 0)
partialSum[threadID] += partialSum[threadID + stride];
__syncthreads();
}
// Store result in the appropriate place in the output stream
if (threadID == 0)
g_data[blockIdx.x] = partialSum[0];
}
// This kernel performs reduction in a fashion that reduces divergent branching between threads in a warp
__global__ void reduction_v2(float *g_data, int n)
{
__shared__ float partialSum[NUM_ELEMENTS];
// Find our place in thread block/grid
unsigned int threadID = threadIdx.x;
unsigned int dataID = blockIdx.x * blockDim.x + threadIdx.x;
// Copy data to shared memory from global memory
if(dataID < n)
partialSum[threadID] = g_data[dataID];
else
partialSum[threadID] = 0.0;
__syncthreads();
for(unsigned int stride = blockDim.x >> 1; stride > 0; stride = stride >> 1){
if(threadID < stride)
partialSum[threadID] += partialSum[threadID + stride];
__syncthreads();
}
// Store result in the appropriate place in the output stream
if(threadID == 0)
g_data[blockIdx.x] = partialSum[0];
}
#endif // #ifndef _SCAN_NAIVE_KERNEL_H_
|
c0c6f95d79be45b1bd0c4ed643f37d77c82d58f0.hip | // !!! This is a file automatically generated by hipify!!!
//============================================================
// STUDENT NAME: Lai Zhin Hou Darryl
// MATRIC NO. : A0122534R
// NUS EMAIL : [email protected]
// COMMENTS TO GRADER:
// <comments to grader, if any>
//
// ============================================================
//
// FILE: unique.cu
// Include files from C standard library.
#include <stdlib.h>
#include <stdio.h>
#include <string.h> // For memcpy().
#include <math.h>
// Includes CUDA.
#include <hip/hip_runtime.h>
// Includes helper functions from CUDA Samples SDK.
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples.
// Include files to use Thrust (a C++ template library for CUDA).
// Thrust v1.7.0 is automatically installed with CUDA Toolkit 6.5.
// Read more about Thrust at the GitHub Thrust project page
// (http://thrust.github.com/).
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/scan.h>
/////////////////////////////////////////////////////////////////////////////
// CONSTANTS & GLOBAL VARIABLES
/////////////////////////////////////////////////////////////////////////////
#define NUM_ELEMS (5*1000000) // Number of elements in input array.
#define BLOCK_SIZE 256
#define NUM_BLOCKS ( ( (NUM_ELEMS) + (BLOCK_SIZE) - 1 ) / (BLOCK_SIZE) )
#define ELEM_MIN 1 // Minimum value in input array (must not be negative).
#define ELEM_MAX 100000 // Maximum value in input array (must not be negative).
//===========================================================================
// CUDA kernel used by GPU_Unique().
//
// Given an input sorted integer array, the kernel marks in the output array
// which elements of the input array should be kept/removed, so that if these
// elements were to be kept/removed, there would be no duplicate elements in
// the sorted array. We want to remove as few elements as possible from the
// input array.
//
// The output of the kernel is an array of 1's and 0's to indicate whether
// the corresponding elements in the input array should be kept or removed --
// a 1 means keep, and 0 means remove. The output array has the same number
// of elements as the input array.
//
// For example, given the following input array
//
// inSortedArray[] = [ 1 1 3 3 3 5 5 7 8 8 ]
//
// the output would be
//
// outSelectionArray[] = [ 1 0 1 0 0 1 0 1 1 0 ]
//
// so that if we keep only those elements in the input array that have a 1
// in the corresponding location in the output array, we will have the
// result [ 1 3 5 7 8 ].
//
// NOTE: You should use shared memory to minimize the number of uncoalesced
// global memory accesses. Shared memory conflicts must be minimized too.
//===========================================================================
__global__ void Kernel_MarkUnique( int *inSortedArray, int *outSelectionArray,
int numElems )
{
//***********************************************
//*********** WRITE YOUR CODE HERE **************
//***********************************************
__shared__ int sorted[ BLOCK_SIZE + 1 ]; // to store the previous index
int tid = blockIdx.x * (BLOCK_SIZE) + threadIdx.x;
int tx = threadIdx.x + 1;
int selection = 0;
if ( (tid >= 0) && (tid < numElems)) {
if (tid != 0 && tx == 1) {
sorted[0] = inSortedArray[tid-1];
}
sorted[tx] = inSortedArray[tid]; // Add data to shared memory
__syncthreads();
if ( tid==0 ||
((tx != 0)&&( sorted[tx] > sorted[tx-1])) ) {
selection = 1;
}
outSelectionArray[tid] = selection;
}
//*/
}
//===========================================================================
// CUDA kernel used by GPU_Unique().
//
// The kernel copies a selected set of elements from the input array to
// specified locations in the output array.
//
// For an input element inArray[i], if selectionArray[i] is 1, then
// the input element is copied to the output array outArray[].
// The location in the output array it is copied to is
// scatterAddressArray[i] + addressOffset.
//
// You can assume that no two elements in the input array inArray[]
// will be selected and copied to the same location in the output
// array outArray[].
//
// NOTE: You do not need to use shared memory, but try to keep the
// number of uncoalesced global memory accesses to the minimal.
//===========================================================================
__global__ void Kernel_Scatter( int *inArray, int *selectionArray,
int *scatterAddressArray, int addressOffset,
int *outArray, int numElems )
{
//***********************************************
//*********** WRITE YOUR CODE HERE **************
//***********************************************
int tid = blockIdx.x * (blockDim.x) + threadIdx.x;
if (tid < numElems && selectionArray[tid]==1) {
outArray[scatterAddressArray[tid] + addressOffset] = inArray[tid];
}
}
//===========================================================================
// Used by GPU_Unique().
//
// Use Thrust's sort algorithm to sort the input integer array on the GPU,
// in non-decreasing order. The sort is performed in-place,
//
// NOTE:
// * The input/output array is already allocated in the device memory.
//===========================================================================
static void GPU_SortIntegerArray( int *d_inoutArray, int numElems )
{
thrust::device_ptr<int> dev_ptr( d_inoutArray );
thrust::sort( dev_ptr, dev_ptr + numElems );
}
//===========================================================================
// Used by GPU_Unique().
//
// Use Thrust's scan algorithm to compute the "inclusive" all-prefix sums on the GPU.
// Also produces the sum of all elements in the input array in the output
// parameter *h_outInArraySum.
//
// NOTE: The input and output arrays are already allocated in the device memory.
//===========================================================================
static void GPU_AllPrefixSums( int *d_inArray, int *d_outArray, int numElems,
int *h_outInArraySum )
{
thrust::device_ptr<int> in_dev_ptr( d_inArray );
thrust::device_ptr<int> out_dev_ptr( d_outArray );
thrust::inclusive_scan( in_dev_ptr, in_dev_ptr + numElems, out_dev_ptr );
// Get the sum of all the elements in the input array. This can be obtained
// from the last element in the all-prefix-sums array.
checkCudaErrors( hipMemcpy( h_outInArraySum, d_outArray + numElems - 1,
sizeof(int), hipMemcpyDeviceToHost ) );
// Using Thrust, the above memory copy can be written as:
// *h_outInArraySum = out_dev_ptr[ numElems - 1 ];
}
//===========================================================================
// GPU version.
//
// Given an input integer array, the function produces an output array
// which is a sorted version of the input array, but with duplicate
// elements removed. The output array is sorted in non-decreasing order.
// The function also produces the number of unique elements in the
// output array in the parameter (*numUniqueElems).
//
// For example, if the input array is [ 5 3 7 5 8 3 1 3 1 8 ], the
// output array would be [ 1 3 5 7 8 ].
//
// When this function is called, sufficient memory storage must have
// already been allocated for the output array. The safest is to allocate
// as much memory as for the input array.
//
// Here, a scan-and-scatter approach is used to do the stream compaction
// on the GPU. The following example demonstrates the steps.
//
// (0) Input array:
// inputArray[] = [ 5 3 7 5 8 3 1 3 1 8 ]
//
// (1) Sort inputArray[]:
// sortedArray[] = [ 1 1 3 3 3 5 5 7 8 8 ]
//
// (2) Mark the unique elements in sortedArray[]:
// selectionArray[] = [ 1 0 1 0 0 1 0 1 1 0 ]
//
// (3) Scan selectionArray[] ("inclusive" all-prefix sums):
// scatterAddressArray[] = [ 1 1 2 2 2 3 3 4 5 5 ]
//
// (4) Scatter sortedArray[] into outputArray[] using scatterAddressArray[] - 1:
// outputArray[] = [ 1 3 5 7 8 ]
//
// Note that the number of unique elements in the output array is the
// value of the last element in scatterAddressArray[].
//
// IMPORTANT: Step (1) to (4) must be computed on the GPU.
//
//===========================================================================
static void GPU_Unique( const int inputArray[], int numInputElems,
int outputArray[], int *numUniqueElems )
{
if ( numInputElems < 1 )
{
(*numUniqueElems) = 0;
return;
}
//---------------------------------------------------------------------------
// Allocate device memory and copy input array from host memory to
// device memory.
//---------------------------------------------------------------------------
// Allocate device memory.
int *d_sortedArray, *d_selectionArray, *d_scatterAddressArray, *d_outputArray;
checkCudaErrors( hipMalloc( (void**) &d_sortedArray, numInputElems * sizeof(int) ) );
checkCudaErrors( hipMalloc( (void**) &d_selectionArray, numInputElems * sizeof(int) ) );
checkCudaErrors( hipMalloc( (void**) &d_scatterAddressArray, numInputElems * sizeof(int) ) );
checkCudaErrors( hipMalloc( (void**) &d_outputArray, numInputElems * sizeof(int) ) );
// Will contain the number of unique elements in the output array.
int numSelectedElems = 0;
// Copy host input array to device memory.
checkCudaErrors( hipMemcpy( d_sortedArray, inputArray, numInputElems * sizeof(int),
hipMemcpyHostToDevice ) );
//---------------------------------------------------------------------------
// Do Step (1) to (4).
//---------------------------------------------------------------------------
//***********************************************
//*********** WRITE YOUR CODE HERE **************
//***********************************************
GPU_SortIntegerArray( d_sortedArray, numInputElems ); // 1
hipLaunchKernelGGL(( Kernel_MarkUnique), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_sortedArray, d_selectionArray, numInputElems ); // 2
GPU_AllPrefixSums(d_selectionArray, d_scatterAddressArray, numInputElems, &numSelectedElems ); // 3
hipLaunchKernelGGL(( Kernel_Scatter), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_sortedArray, d_selectionArray, d_scatterAddressArray, -1, d_outputArray, numInputElems ); // 4
//---------------------------------------------------------------------------
// Copy the final result from the device memory to the host memory.
//---------------------------------------------------------------------------
checkCudaErrors( hipMemcpy( outputArray, d_outputArray, numSelectedElems * sizeof(int),
hipMemcpyDeviceToHost ) );
(*numUniqueElems) = numSelectedElems;
//---------------------------------------------------------------------------
// Clean up.
//---------------------------------------------------------------------------
// Free device memory.
checkCudaErrors( hipFree( d_sortedArray ) );
checkCudaErrors( hipFree( d_selectionArray ) );
checkCudaErrors( hipFree( d_scatterAddressArray ) );
checkCudaErrors( hipFree( d_outputArray ) );
}
//===========================================================================
// Quicksort to sort the input integer array in-place in ascending order.
// To sort the entire input array, call Quicksort(array, 0, numElems-1).
//===========================================================================
#define SWAP(x, y, t) ((t)=(x),(x)=(y),(y)=(t))
static void Quicksort( int a[], int first, int last )
{
int tmp; // Temporary variable for SWAP.
if( first < last )
{
int pivot = a[first];
int i = first - 1;
int j = last + 1;
while( true )
{
do { j--; } while ( a[j] > pivot );
do { i++; } while ( a[i] < pivot );
if( i < j )
SWAP( a[i], a[j], tmp );
else
break;
}
Quicksort( a, first, j );
Quicksort( a, j + 1, last );
}
}
#undef SWAP
//===========================================================================
// CPU version.
//
// Given an input integer array, the function produces an output array
// which is a sorted version of the input array, but with duplicate
// elements removed. The output array is sorted in non-decreasing order.
// The function also produces the number of unique elements in the
// output array in the parameter (*numUniqueElems).
//
// When this function is called, sufficient memory storage must have
// already been allocated for the output array. The safest is to allocate
// as much memory as for the input array.
//===========================================================================
static void CPU_Unique( const int inputArray[], int numInputElems,
int outputArray[], int *numUniqueElems )
{
if ( numInputElems < 1 )
{
(*numUniqueElems) = 0;
return;
}
int *sortedArray = (int *) malloc( numInputElems * sizeof(int) );
memcpy( sortedArray, inputArray, numInputElems * sizeof(int) );
Quicksort( sortedArray, 0, numInputElems - 1 );
outputArray[0] = sortedArray[0];
int uniqueCount = 1;
for ( int i = 1; i < numInputElems; i++ )
if ( sortedArray[i] != sortedArray[i-1] )
outputArray[ uniqueCount++ ] = sortedArray[i];
(*numUniqueElems) = uniqueCount;
}
//===========================================================================
// Generates a set of random integers, each has value from elemMin to
// elemMax, and put them in the array intArray[].
//===========================================================================
static void GenerateRandomIntegers( int intArray[], int numElems, int elemMin, int elemMax )
{
for ( int i = 0; i < numElems; i++ )
{
int rand32 = rand() * (RAND_MAX + 1) + rand();
intArray[i] = rand32 % (elemMax - elemMin + 1) + elemMin;
}
}
//===========================================================================
// Return true iff all corresponding elements in the int
// arrays A and B are equal.
//===========================================================================
static bool IntArrayEqual( const int A[], const int B[], int numElems )
{
for ( int i = 0; i < numElems; i++ )
if ( A[i] != B[i] ) return false;
return true;
}
void WaitForEnterKeyBeforeExit( void )
{
fflush( stdin );
getchar();
}
//===========================================================================
// The main function
//===========================================================================
int main(int argc, char** argv)
{
atexit( WaitForEnterKeyBeforeExit );
// Set seed for rand().
srand( 927 );
// Use command-line specified CUDA device, otherwise use device with highest Gflops/s.
int devID = findCudaDevice( argc, (const char **)argv );
// Create a timer.
StopWatchInterface *timer = 0;
sdkCreateTimer( &timer );
//---------------------------------------------------------------------------
// Allocate host memory and generate test data.
//---------------------------------------------------------------------------
// Allocate host memory for input integer array.
int *inputArray = (int *) malloc( NUM_ELEMS * sizeof(int) );
// Allocate host memory for result arrays.
int *cpu_uniqueArray = (int *) malloc( NUM_ELEMS * sizeof(int) );
int *gpu_uniqueArray = (int *) malloc( NUM_ELEMS * sizeof(int) );
// Number of unique elements in input array computed by different methods.
int cpu_numUniqueElems = 0;
int gpu_numUniqueElems = 0;
// Fill the input array with random integers.
GenerateRandomIntegers( inputArray, NUM_ELEMS, ELEM_MIN, ELEM_MAX );
//---------------------------------------------------------------------------
// Print some program parameter values.
//---------------------------------------------------------------------------
printf( "NUM_ELEMS = %d\n", NUM_ELEMS );
printf( "BLOCK_SIZE = %d\n", BLOCK_SIZE );
printf( "NUM_BLOCKS = %d\n", NUM_BLOCKS );
printf( "ELEM_MIN = %d\n", ELEM_MIN );
printf( "ELEM_MAX = %d\n", ELEM_MAX );
printf( "\n\n" );
//---------------------------------------------------------------------------
// Perform computation on CPU.
//---------------------------------------------------------------------------
printf( "CPU COMPUTATION:\n" );
// Reset and start timer.
sdkResetTimer( &timer );
sdkStartTimer( &timer );
// Compute on CPU.
CPU_Unique( inputArray, NUM_ELEMS, cpu_uniqueArray, &cpu_numUniqueElems );
// Stop timer.
sdkStopTimer( &timer );
printf( "Processing time = %.3f ms\n", sdkGetTimerValue( &timer ) );
// Print some results.
printf( "Number of unique elements = %d\n", cpu_numUniqueElems );
printf( "\n\n" );
//---------------------------------------------------------------------------
// Perform computation on GPU.
//---------------------------------------------------------------------------
printf( "GPU COMPUTATION:\n" );
// Reset and start timer.
sdkResetTimer( &timer );
sdkStartTimer( &timer );
// Compute on GPU.
GPU_Unique( inputArray, NUM_ELEMS, gpu_uniqueArray, &gpu_numUniqueElems );
// Stop timer.
sdkStopTimer( &timer );
printf( "Processing time = %.3f ms\n", sdkGetTimerValue( &timer ) );
// Print some results.
printf( "Number of unique elements = %d\n", gpu_numUniqueElems );
printf( "\n" );
// Check result with reference result computed by CPU.
bool equal = ( gpu_numUniqueElems == cpu_numUniqueElems ) &&
IntArrayEqual( cpu_uniqueArray, gpu_uniqueArray, cpu_numUniqueElems );
printf( "Verify GPU result... %s\n", (equal)? "PASS" : "FAIL" );
printf( "\n\n" );
//---------------------------------------------------------------------------
// Clean up.
//---------------------------------------------------------------------------
// Destroy the timer.
sdkDeleteTimer( &timer );
// Free up memory.
free( inputArray );
free( cpu_uniqueArray );
free( gpu_uniqueArray );
hipDeviceReset();
}
| c0c6f95d79be45b1bd0c4ed643f37d77c82d58f0.cu | //============================================================
// STUDENT NAME: Lai Zhin Hou Darryl
// MATRIC NO. : A0122534R
// NUS EMAIL : [email protected]
// COMMENTS TO GRADER:
// <comments to grader, if any>
//
// ============================================================
//
// FILE: unique.cu
// Include files from C standard library.
#include <stdlib.h>
#include <stdio.h>
#include <string.h> // For memcpy().
#include <math.h>
// Includes CUDA.
#include <cuda_runtime.h>
// Includes helper functions from CUDA Samples SDK.
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples.
// Include files to use Thrust (a C++ template library for CUDA).
// Thrust v1.7.0 is automatically installed with CUDA Toolkit 6.5.
// Read more about Thrust at the GitHub Thrust project page
// (http://thrust.github.com/).
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/scan.h>
/////////////////////////////////////////////////////////////////////////////
// CONSTANTS & GLOBAL VARIABLES
/////////////////////////////////////////////////////////////////////////////
#define NUM_ELEMS (5*1000000) // Number of elements in input array.
#define BLOCK_SIZE 256
#define NUM_BLOCKS ( ( (NUM_ELEMS) + (BLOCK_SIZE) - 1 ) / (BLOCK_SIZE) )
#define ELEM_MIN 1 // Minimum value in input array (must not be negative).
#define ELEM_MAX 100000 // Maximum value in input array (must not be negative).
//===========================================================================
// CUDA kernel used by GPU_Unique().
//
// Given an input sorted integer array, the kernel marks in the output array
// which elements of the input array should be kept/removed, so that if these
// elements were to be kept/removed, there would be no duplicate elements in
// the sorted array. We want to remove as few elements as possible from the
// input array.
//
// The output of the kernel is an array of 1's and 0's to indicate whether
// the corresponding elements in the input array should be kept or removed --
// a 1 means keep, and 0 means remove. The output array has the same number
// of elements as the input array.
//
// For example, given the following input array
//
// inSortedArray[] = [ 1 1 3 3 3 5 5 7 8 8 ]
//
// the output would be
//
// outSelectionArray[] = [ 1 0 1 0 0 1 0 1 1 0 ]
//
// so that if we keep only those elements in the input array that have a 1
// in the corresponding location in the output array, we will have the
// result [ 1 3 5 7 8 ].
//
// NOTE: You should use shared memory to minimize the number of uncoalesced
// global memory accesses. Shared memory conflicts must be minimized too.
//===========================================================================
__global__ void Kernel_MarkUnique( int *inSortedArray, int *outSelectionArray,
int numElems )
{
//***********************************************
//*********** WRITE YOUR CODE HERE **************
//***********************************************
__shared__ int sorted[ BLOCK_SIZE + 1 ]; // to store the previous index
int tid = blockIdx.x * (BLOCK_SIZE) + threadIdx.x;
int tx = threadIdx.x + 1;
int selection = 0;
if ( (tid >= 0) && (tid < numElems)) {
if (tid != 0 && tx == 1) {
sorted[0] = inSortedArray[tid-1];
}
sorted[tx] = inSortedArray[tid]; // Add data to shared memory
__syncthreads();
if ( tid==0 ||
((tx != 0)&&( sorted[tx] > sorted[tx-1])) ) {
selection = 1;
}
outSelectionArray[tid] = selection;
}
//*/
}
//===========================================================================
// CUDA kernel used by GPU_Unique().
//
// The kernel copies a selected set of elements from the input array to
// specified locations in the output array.
//
// For an input element inArray[i], if selectionArray[i] is 1, then
// the input element is copied to the output array outArray[].
// The location in the output array it is copied to is
// scatterAddressArray[i] + addressOffset.
//
// You can assume that no two elements in the input array inArray[]
// will be selected and copied to the same location in the output
// array outArray[].
//
// NOTE: You do not need to use shared memory, but try to keep the
// number of uncoalesced global memory accesses to the minimal.
//===========================================================================
__global__ void Kernel_Scatter( int *inArray, int *selectionArray,
int *scatterAddressArray, int addressOffset,
int *outArray, int numElems )
{
//***********************************************
//*********** WRITE YOUR CODE HERE **************
//***********************************************
int tid = blockIdx.x * (blockDim.x) + threadIdx.x;
if (tid < numElems && selectionArray[tid]==1) {
outArray[scatterAddressArray[tid] + addressOffset] = inArray[tid];
}
}
//===========================================================================
// Used by GPU_Unique().
//
// Use Thrust's sort algorithm to sort the input integer array on the GPU,
// in non-decreasing order. The sort is performed in-place,
//
// NOTE:
// * The input/output array is already allocated in the device memory.
//===========================================================================
static void GPU_SortIntegerArray( int *d_inoutArray, int numElems )
{
thrust::device_ptr<int> dev_ptr( d_inoutArray );
thrust::sort( dev_ptr, dev_ptr + numElems );
}
//===========================================================================
// Used by GPU_Unique().
//
// Use Thrust's scan algorithm to compute the "inclusive" all-prefix sums on the GPU.
// Also produces the sum of all elements in the input array in the output
// parameter *h_outInArraySum.
//
// NOTE: The input and output arrays are already allocated in the device memory.
//===========================================================================
static void GPU_AllPrefixSums( int *d_inArray, int *d_outArray, int numElems,
int *h_outInArraySum )
{
thrust::device_ptr<int> in_dev_ptr( d_inArray );
thrust::device_ptr<int> out_dev_ptr( d_outArray );
thrust::inclusive_scan( in_dev_ptr, in_dev_ptr + numElems, out_dev_ptr );
// Get the sum of all the elements in the input array. This can be obtained
// from the last element in the all-prefix-sums array.
checkCudaErrors( cudaMemcpy( h_outInArraySum, d_outArray + numElems - 1,
sizeof(int), cudaMemcpyDeviceToHost ) );
// Using Thrust, the above memory copy can be written as:
// *h_outInArraySum = out_dev_ptr[ numElems - 1 ];
}
//===========================================================================
// GPU version.
//
// Given an input integer array, the function produces an output array
// which is a sorted version of the input array, but with duplicate
// elements removed. The output array is sorted in non-decreasing order.
// The function also produces the number of unique elements in the
// output array in the parameter (*numUniqueElems).
//
// For example, if the input array is [ 5 3 7 5 8 3 1 3 1 8 ], the
// output array would be [ 1 3 5 7 8 ].
//
// When this function is called, sufficient memory storage must have
// already been allocated for the output array. The safest is to allocate
// as much memory as for the input array.
//
// Here, a scan-and-scatter approach is used to do the stream compaction
// on the GPU. The following example demonstrates the steps.
//
// (0) Input array:
// inputArray[] = [ 5 3 7 5 8 3 1 3 1 8 ]
//
// (1) Sort inputArray[]:
// sortedArray[] = [ 1 1 3 3 3 5 5 7 8 8 ]
//
// (2) Mark the unique elements in sortedArray[]:
// selectionArray[] = [ 1 0 1 0 0 1 0 1 1 0 ]
//
// (3) Scan selectionArray[] ("inclusive" all-prefix sums):
// scatterAddressArray[] = [ 1 1 2 2 2 3 3 4 5 5 ]
//
// (4) Scatter sortedArray[] into outputArray[] using scatterAddressArray[] - 1:
// outputArray[] = [ 1 3 5 7 8 ]
//
// Note that the number of unique elements in the output array is the
// value of the last element in scatterAddressArray[].
//
// IMPORTANT: Step (1) to (4) must be computed on the GPU.
//
//===========================================================================
static void GPU_Unique( const int inputArray[], int numInputElems,
int outputArray[], int *numUniqueElems )
{
if ( numInputElems < 1 )
{
(*numUniqueElems) = 0;
return;
}
//---------------------------------------------------------------------------
// Allocate device memory and copy input array from host memory to
// device memory.
//---------------------------------------------------------------------------
// Allocate device memory.
int *d_sortedArray, *d_selectionArray, *d_scatterAddressArray, *d_outputArray;
checkCudaErrors( cudaMalloc( (void**) &d_sortedArray, numInputElems * sizeof(int) ) );
checkCudaErrors( cudaMalloc( (void**) &d_selectionArray, numInputElems * sizeof(int) ) );
checkCudaErrors( cudaMalloc( (void**) &d_scatterAddressArray, numInputElems * sizeof(int) ) );
checkCudaErrors( cudaMalloc( (void**) &d_outputArray, numInputElems * sizeof(int) ) );
// Will contain the number of unique elements in the output array.
int numSelectedElems = 0;
// Copy host input array to device memory.
checkCudaErrors( cudaMemcpy( d_sortedArray, inputArray, numInputElems * sizeof(int),
cudaMemcpyHostToDevice ) );
//---------------------------------------------------------------------------
// Do Step (1) to (4).
//---------------------------------------------------------------------------
//***********************************************
//*********** WRITE YOUR CODE HERE **************
//***********************************************
GPU_SortIntegerArray( d_sortedArray, numInputElems ); // 1
Kernel_MarkUnique<<<NUM_BLOCKS, BLOCK_SIZE>>>( d_sortedArray, d_selectionArray, numInputElems ); // 2
GPU_AllPrefixSums(d_selectionArray, d_scatterAddressArray, numInputElems, &numSelectedElems ); // 3
Kernel_Scatter<<<NUM_BLOCKS, BLOCK_SIZE>>>( d_sortedArray, d_selectionArray, d_scatterAddressArray, -1, d_outputArray, numInputElems ); // 4
//---------------------------------------------------------------------------
// Copy the final result from the device memory to the host memory.
//---------------------------------------------------------------------------
checkCudaErrors( cudaMemcpy( outputArray, d_outputArray, numSelectedElems * sizeof(int),
cudaMemcpyDeviceToHost ) );
(*numUniqueElems) = numSelectedElems;
//---------------------------------------------------------------------------
// Clean up.
//---------------------------------------------------------------------------
// Free device memory.
checkCudaErrors( cudaFree( d_sortedArray ) );
checkCudaErrors( cudaFree( d_selectionArray ) );
checkCudaErrors( cudaFree( d_scatterAddressArray ) );
checkCudaErrors( cudaFree( d_outputArray ) );
}
//===========================================================================
// Quicksort to sort the input integer array in-place in ascending order.
// To sort the entire input array, call Quicksort(array, 0, numElems-1).
//===========================================================================
#define SWAP(x, y, t) ((t)=(x),(x)=(y),(y)=(t))
static void Quicksort( int a[], int first, int last )
{
int tmp; // Temporary variable for SWAP.
if( first < last )
{
int pivot = a[first];
int i = first - 1;
int j = last + 1;
while( true )
{
do { j--; } while ( a[j] > pivot );
do { i++; } while ( a[i] < pivot );
if( i < j )
SWAP( a[i], a[j], tmp );
else
break;
}
Quicksort( a, first, j );
Quicksort( a, j + 1, last );
}
}
#undef SWAP
//===========================================================================
// CPU version.
//
// Given an input integer array, the function produces an output array
// which is a sorted version of the input array, but with duplicate
// elements removed. The output array is sorted in non-decreasing order.
// The function also produces the number of unique elements in the
// output array in the parameter (*numUniqueElems).
//
// When this function is called, sufficient memory storage must have
// already been allocated for the output array. The safest is to allocate
// as much memory as for the input array.
//===========================================================================
static void CPU_Unique( const int inputArray[], int numInputElems,
int outputArray[], int *numUniqueElems )
{
if ( numInputElems < 1 )
{
(*numUniqueElems) = 0;
return;
}
int *sortedArray = (int *) malloc( numInputElems * sizeof(int) );
memcpy( sortedArray, inputArray, numInputElems * sizeof(int) );
Quicksort( sortedArray, 0, numInputElems - 1 );
outputArray[0] = sortedArray[0];
int uniqueCount = 1;
for ( int i = 1; i < numInputElems; i++ )
if ( sortedArray[i] != sortedArray[i-1] )
outputArray[ uniqueCount++ ] = sortedArray[i];
(*numUniqueElems) = uniqueCount;
}
//===========================================================================
// Generates a set of random integers, each has value from elemMin to
// elemMax, and put them in the array intArray[].
//===========================================================================
static void GenerateRandomIntegers( int intArray[], int numElems, int elemMin, int elemMax )
{
for ( int i = 0; i < numElems; i++ )
{
int rand32 = rand() * (RAND_MAX + 1) + rand();
intArray[i] = rand32 % (elemMax - elemMin + 1) + elemMin;
}
}
//===========================================================================
// Return true iff all corresponding elements in the int
// arrays A and B are equal.
//===========================================================================
static bool IntArrayEqual( const int A[], const int B[], int numElems )
{
for ( int i = 0; i < numElems; i++ )
if ( A[i] != B[i] ) return false;
return true;
}
void WaitForEnterKeyBeforeExit( void )
{
fflush( stdin );
getchar();
}
//===========================================================================
// The main function
//===========================================================================
int main(int argc, char** argv)
{
atexit( WaitForEnterKeyBeforeExit );
// Set seed for rand().
srand( 927 );
// Use command-line specified CUDA device, otherwise use device with highest Gflops/s.
int devID = findCudaDevice( argc, (const char **)argv );
// Create a timer.
StopWatchInterface *timer = 0;
sdkCreateTimer( &timer );
//---------------------------------------------------------------------------
// Allocate host memory and generate test data.
//---------------------------------------------------------------------------
// Allocate host memory for input integer array.
int *inputArray = (int *) malloc( NUM_ELEMS * sizeof(int) );
// Allocate host memory for result arrays.
int *cpu_uniqueArray = (int *) malloc( NUM_ELEMS * sizeof(int) );
int *gpu_uniqueArray = (int *) malloc( NUM_ELEMS * sizeof(int) );
// Number of unique elements in input array computed by different methods.
int cpu_numUniqueElems = 0;
int gpu_numUniqueElems = 0;
// Fill the input array with random integers.
GenerateRandomIntegers( inputArray, NUM_ELEMS, ELEM_MIN, ELEM_MAX );
//---------------------------------------------------------------------------
// Print some program parameter values.
//---------------------------------------------------------------------------
printf( "NUM_ELEMS = %d\n", NUM_ELEMS );
printf( "BLOCK_SIZE = %d\n", BLOCK_SIZE );
printf( "NUM_BLOCKS = %d\n", NUM_BLOCKS );
printf( "ELEM_MIN = %d\n", ELEM_MIN );
printf( "ELEM_MAX = %d\n", ELEM_MAX );
printf( "\n\n" );
//---------------------------------------------------------------------------
// Perform computation on CPU.
//---------------------------------------------------------------------------
printf( "CPU COMPUTATION:\n" );
// Reset and start timer.
sdkResetTimer( &timer );
sdkStartTimer( &timer );
// Compute on CPU.
CPU_Unique( inputArray, NUM_ELEMS, cpu_uniqueArray, &cpu_numUniqueElems );
// Stop timer.
sdkStopTimer( &timer );
printf( "Processing time = %.3f ms\n", sdkGetTimerValue( &timer ) );
// Print some results.
printf( "Number of unique elements = %d\n", cpu_numUniqueElems );
printf( "\n\n" );
//---------------------------------------------------------------------------
// Perform computation on GPU.
//---------------------------------------------------------------------------
printf( "GPU COMPUTATION:\n" );
// Reset and start timer.
sdkResetTimer( &timer );
sdkStartTimer( &timer );
// Compute on GPU.
GPU_Unique( inputArray, NUM_ELEMS, gpu_uniqueArray, &gpu_numUniqueElems );
// Stop timer.
sdkStopTimer( &timer );
printf( "Processing time = %.3f ms\n", sdkGetTimerValue( &timer ) );
// Print some results.
printf( "Number of unique elements = %d\n", gpu_numUniqueElems );
printf( "\n" );
// Check result with reference result computed by CPU.
bool equal = ( gpu_numUniqueElems == cpu_numUniqueElems ) &&
IntArrayEqual( cpu_uniqueArray, gpu_uniqueArray, cpu_numUniqueElems );
printf( "Verify GPU result... %s\n", (equal)? "PASS" : "FAIL" );
printf( "\n\n" );
//---------------------------------------------------------------------------
// Clean up.
//---------------------------------------------------------------------------
// Destroy the timer.
sdkDeleteTimer( &timer );
// Free up memory.
free( inputArray );
free( cpu_uniqueArray );
free( gpu_uniqueArray );
cudaDeviceReset();
}
|
060daa970873beb9f8e7e84a4be29d25dac316bf.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include "common.h"
#include <stdio.h>
#include <hiprand/hiprand_kernel.h>
#define THREADS_PER_BLOCK 32
#define SEED 60
//Generate data
__global__ void mcrandom(double *udata, const int N, const int nb, hiprandState_t *states)
{
unsigned int i_glb = blockIdx.x * blockDim.x + threadIdx.x;
int n = N/nb;
//initialse hiprand
hiprand_init((SEED << 20) + i_glb, 0, 0, &states[i_glb]);
if (i_glb<n)
{
for(int i=0; i<nb; i++)
{
double xran = hiprand_uniform_double (&states[i_glb]);
udata[i_glb] += cos(-log(xran));
}
}
}
//reduction kernel
__global__ void reductionOnGPU(double *udata,float *f)
{
__shared__ double u[THREADS_PER_BLOCK];
unsigned int i_glb = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int i_loc = threadIdx.x;
int ib = blockDim.x;
unsigned int i;
//load memory
u[i_loc] = udata[i_glb];
__syncthreads();
//reduction in shared memory
for (i = 1; i<ib; i *=2)
{
int index = 2*i*i_loc;
//__syncthreads();
if (index < blockDim.x)
{
u[index] += u[i + index];
}
__syncthreads();
}
if(i_loc==0)
{
atomicAdd(f,u[0]);
}
}
__global__ void integralOnGPU(float *f, double *Int ,const int N)
{
//global mean
*Int = *f/N;
}
int main(int argc, char **argv)
{
// problem size
long int N = atol(argv[1]);
int T = THREADS_PER_BLOCK;
//random number generator
hiprandState_t *States;
// malloc host memory
double gpuRef;
//start timing
double iStart = seconds();
// malloc device global memory
float *d_f;
double *d_Int;
double *d_udata;
CHECK(hipMalloc((void **)&d_Int, sizeof(double)));
//invoke the kernel
int B = ((N + T -1)/T);
if(B > 65535) B = 65535;
int nb = ceil((N*1.0)/(B*T));
//states allocate memory
CHECK(hipMalloc( (void **)&States, (B*T)*sizeof(hiprandState_t)));
CHECK(hipMalloc((void **)&d_udata, (B*T)*sizeof(double)));
CHECK(hipMalloc((void **)&d_f, (B*T)*sizeof(float)));
//double iStart = seconds();
hipLaunchKernelGGL(( mcrandom), dim3(B),dim3(T), 0, 0, d_udata, N, nb, States);
CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( reductionOnGPU), dim3(B),dim3(T), 0, 0, d_udata,d_f);
CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( integralOnGPU), dim3(1),dim3(1), 0, 0, d_f,d_Int ,N);
CHECK(hipDeviceSynchronize());
double iElaps_g = seconds() - iStart;
// check kernel error
CHECK(hipGetLastError());
//double iElaps_g = seconds() - iStart;
// copy kernel result back to host side
CHECK(hipMemcpy(&gpuRef, d_Int, sizeof(double), hipMemcpyDeviceToHost));
//error achived
double error = abs(0.5 - gpuRef);
printf("%lld,%f,%e,%f\n",N,gpuRef,error,iElaps_g);
//free device memory
CHECK(hipFree(States));
CHECK(hipFree(d_f));
CHECK(hipFree(d_Int));
CHECK(hipFree(d_udata));
// reset device
CHECK(hipDeviceReset());
return (0);
}
| 060daa970873beb9f8e7e84a4be29d25dac316bf.cu | #include <cuda_runtime.h>
#include <curand.h>
#include "common.h"
#include <stdio.h>
#include <curand_kernel.h>
#define THREADS_PER_BLOCK 32
#define SEED 60
//Generate data
__global__ void mcrandom(double *udata, const int N, const int nb, curandState *states)
{
unsigned int i_glb = blockIdx.x * blockDim.x + threadIdx.x;
int n = N/nb;
//initialse curand
curand_init((SEED << 20) + i_glb, 0, 0, &states[i_glb]);
if (i_glb<n)
{
for(int i=0; i<nb; i++)
{
double xran = curand_uniform_double (&states[i_glb]);
udata[i_glb] += cos(-log(xran));
}
}
}
//reduction kernel
__global__ void reductionOnGPU(double *udata,float *f)
{
__shared__ double u[THREADS_PER_BLOCK];
unsigned int i_glb = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int i_loc = threadIdx.x;
int ib = blockDim.x;
unsigned int i;
//load memory
u[i_loc] = udata[i_glb];
__syncthreads();
//reduction in shared memory
for (i = 1; i<ib; i *=2)
{
int index = 2*i*i_loc;
//__syncthreads();
if (index < blockDim.x)
{
u[index] += u[i + index];
}
__syncthreads();
}
if(i_loc==0)
{
atomicAdd(f,u[0]);
}
}
__global__ void integralOnGPU(float *f, double *Int ,const int N)
{
//global mean
*Int = *f/N;
}
int main(int argc, char **argv)
{
// problem size
long int N = atol(argv[1]);
int T = THREADS_PER_BLOCK;
//random number generator
curandState *States;
// malloc host memory
double gpuRef;
//start timing
double iStart = seconds();
// malloc device global memory
float *d_f;
double *d_Int;
double *d_udata;
CHECK(cudaMalloc((void **)&d_Int, sizeof(double)));
//invoke the kernel
int B = ((N + T -1)/T);
if(B > 65535) B = 65535;
int nb = ceil((N*1.0)/(B*T));
//states allocate memory
CHECK(cudaMalloc( (void **)&States, (B*T)*sizeof(curandState)));
CHECK(cudaMalloc((void **)&d_udata, (B*T)*sizeof(double)));
CHECK(cudaMalloc((void **)&d_f, (B*T)*sizeof(float)));
//double iStart = seconds();
mcrandom<<<B,T>>>(d_udata, N, nb, States);
CHECK(cudaDeviceSynchronize());
reductionOnGPU<<<B,T>>>(d_udata,d_f);
CHECK(cudaDeviceSynchronize());
integralOnGPU<<<1,1>>>(d_f,d_Int ,N);
CHECK(cudaDeviceSynchronize());
double iElaps_g = seconds() - iStart;
// check kernel error
CHECK(cudaGetLastError());
//double iElaps_g = seconds() - iStart;
// copy kernel result back to host side
CHECK(cudaMemcpy(&gpuRef, d_Int, sizeof(double), cudaMemcpyDeviceToHost));
//error achived
double error = abs(0.5 - gpuRef);
printf("%lld,%f,%e,%f\n",N,gpuRef,error,iElaps_g);
//free device memory
CHECK(cudaFree(States));
CHECK(cudaFree(d_f));
CHECK(cudaFree(d_Int));
CHECK(cudaFree(d_udata));
// reset device
CHECK(cudaDeviceReset());
return (0);
}
|
a872722b3d9c9dbdfc7717d0d4aedd311b90a1c1.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file electric_force_cuda_kernel.cu
* @author Yibo Lin
* @date Aug 2018
*/
#include <float.h>
#include <math.h>
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "utility/src/utils.cuh"
// local dependency
#include "electric_potential/src/density_function.h"
DREAMPLACE_BEGIN_NAMESPACE
/// define triangle_density_function
template <typename T>
inline __device__ DEFINE_TRIANGLE_DENSITY_FUNCTION(T);
template <typename T>
__global__ void __launch_bounds__(1024, 8) computeElectricForce(
int num_bins_x, int num_bins_y, const T *field_map_x_tensor,
const T *field_map_y_tensor, const T *x_tensor, const T *y_tensor,
const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor,
const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor,
const T *bin_center_x_tensor, const T *bin_center_y_tensor, T xl, T yl,
T xh, T yh, const T half_bin_size_x, const T half_bin_size_y,
const T bin_size_x, const T bin_size_y, const T inv_bin_size_x,
const T inv_bin_size_y, int num_nodes, T *grad_x_tensor, T *grad_y_tensor,
const int *sorted_node_map ///< can be NULL if not sorted
) {
int index = blockIdx.x * blockDim.z + threadIdx.z;
if (index < num_nodes) {
int i = (sorted_node_map) ? sorted_node_map[index] : index;
// use stretched node size
T node_size_x = node_size_x_clamped_tensor[i];
T node_size_y = node_size_y_clamped_tensor[i];
T node_x = x_tensor[i] + offset_x_tensor[i];
T node_y = y_tensor[i] + offset_y_tensor[i];
T ratio = ratio_tensor[i];
// Yibo: looks very weird implementation, but this is how RePlAce implements
// it Zixuan and Jiaqi: use the common practice of floor
int bin_index_xl = int((node_x - xl) * inv_bin_size_x);
int bin_index_xh =
int(((node_x + node_size_x - xl) * inv_bin_size_x)) + 1; // exclusive
bin_index_xl = DREAMPLACE_STD_NAMESPACE::max(bin_index_xl, 0);
bin_index_xh = DREAMPLACE_STD_NAMESPACE::min(bin_index_xh, num_bins_x);
int bin_index_yl = int((node_y - yl) * inv_bin_size_y);
int bin_index_yh =
int(((node_y + node_size_y - yl) * inv_bin_size_y)) + 1; // exclusive
bin_index_yl = DREAMPLACE_STD_NAMESPACE::max(bin_index_yl, 0);
bin_index_yh = DREAMPLACE_STD_NAMESPACE::min(bin_index_yh, num_bins_y);
// blockDim.x * blockDim.y threads will be used to update one node
// shared memory is used to privatize the atomic memory access to thread
// block
extern __shared__ unsigned char s_xy[];
T *s_x = (T *)s_xy;
T *s_y = s_x + blockDim.z;
if (threadIdx.x == 0 && threadIdx.y == 0) {
s_x[threadIdx.z] = s_y[threadIdx.z] = 0;
}
__syncthreads();
T tmp_x, tmp_y;
tmp_x = 0;
tmp_y = 0;
// update density potential map
for (int k = bin_index_xl + threadIdx.y; k < bin_index_xh;
k += blockDim.y) {
T px = triangle_density_function(node_x, node_size_x, xl, k, bin_size_x);
for (int h = bin_index_yl + threadIdx.x; h < bin_index_yh;
h += blockDim.x) {
T py =
triangle_density_function(node_y, node_size_y, yl, h, bin_size_y);
T area = px * py;
int idx = k * num_bins_y + h;
tmp_x += area * field_map_x_tensor[idx];
tmp_y += area * field_map_y_tensor[idx];
}
}
atomicAdd(&s_x[threadIdx.z], tmp_x * ratio);
atomicAdd(&s_y[threadIdx.z], tmp_y * ratio);
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0) {
grad_x_tensor[i] = s_x[threadIdx.z];
grad_y_tensor[i] = s_y[threadIdx.z];
}
}
}
/// @brief An unrolled way to compute the force.
/// Currently it is not as efficient as computeElectricForce,
/// it has the potential to be better.
/// It is not used for now.
template <typename T>
__global__ void computeElectricForceUnroll(
int num_bins_x, int num_bins_y, const T *field_map_x_tensor,
const T *field_map_y_tensor, const T *x_tensor, const T *y_tensor,
const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor,
const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor,
const T *bin_center_x_tensor, const T *bin_center_y_tensor, T xl, T yl,
T xh, T yh, const T half_bin_size_x, const T half_bin_size_y,
const T bin_size_x, const T bin_size_y, const T inv_bin_size_x,
const T inv_bin_size_y, int num_nodes, T *grad_x_tensor, T *grad_y_tensor,
const int *sorted_node_map ///< can be NULL if not sorted
) {
int index = blockIdx.x * blockDim.y + threadIdx.y;
if (index < num_nodes) {
int i = (sorted_node_map) ? sorted_node_map[index] : index;
// stretch node size to bin size
T node_size_x = node_size_x_clamped_tensor[i];
T node_size_y = node_size_y_clamped_tensor[i];
T node_x = x_tensor[i] + offset_x_tensor[i];
T node_y = y_tensor[i] + offset_y_tensor[i];
T ratio = ratio_tensor[i];
// Yibo: looks very weird implementation, but this is how RePlAce implements
// it Zixuan and Jiaqi: use the common practice of floor
int bin_index_xl = int((node_x - xl) * inv_bin_size_x);
int bin_index_xh =
int(((node_x + node_size_x - xl) * inv_bin_size_x)); // inclusive
bin_index_xl = DREAMPLACE_STD_NAMESPACE::max(bin_index_xl, 0);
bin_index_xh = DREAMPLACE_STD_NAMESPACE::min(bin_index_xh, num_bins_x - 1);
int bin_index_yl = int((node_y - yl) * inv_bin_size_y);
int bin_index_yh =
int(((node_y + node_size_y - yl) * inv_bin_size_y)); // inclusive
bin_index_yl = DREAMPLACE_STD_NAMESPACE::max(bin_index_yl, 0);
bin_index_yh = DREAMPLACE_STD_NAMESPACE::min(bin_index_yh, num_bins_y - 1);
int k, h;
int cond =
((bin_index_xl == bin_index_xh) << 1) | (bin_index_yl == bin_index_yh);
switch (cond) {
case 0: {
// blockDim.x threads will be used to update one node
// shared memory is used to privatize the atomic memory access to thread
// block
extern __shared__ unsigned char shared_memory[];
T *s_x = (T *)shared_memory;
T *s_y = s_x + blockDim.y;
if (threadIdx.x == 0) {
s_x[threadIdx.y] = s_y[threadIdx.y] = 0;
}
__syncthreads();
T tmp_x = 0;
T tmp_y = 0;
T px_c = bin_size_x;
T py_l = yl + bin_index_yl * bin_size_y + bin_size_y - node_y;
T py_c = bin_size_y;
T py_h = node_y + node_size_y - (bin_index_yh * bin_size_y + yl);
T area_xc_yl = px_c * py_l;
T area_xc_yc = px_c * py_c;
T area_xc_yh = px_c * py_h;
k = bin_index_xl;
if (threadIdx.x == 0) {
T px_l = xl + bin_index_xl * bin_size_x + bin_size_x - node_x;
T area_xl_yl = px_l * py_l;
T area_xl_yc = px_l * py_c;
T area_xl_yh = px_l * py_h;
h = bin_index_yl;
tmp_x = area_xl_yl * field_map_x_tensor[k * num_bins_y + h];
tmp_y = area_xl_yl * field_map_y_tensor[k * num_bins_y + h];
for (++h; h < bin_index_yh; ++h) {
tmp_x += area_xl_yc * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_xl_yc * field_map_y_tensor[k * num_bins_y + h];
}
tmp_x += area_xl_yh * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_xl_yh * field_map_y_tensor[k * num_bins_y + h];
k += blockDim.x;
}
for (k += threadIdx.x; k < bin_index_xh; k += blockDim.x) {
h = bin_index_yl;
tmp_x += area_xc_yl * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_xc_yl * field_map_y_tensor[k * num_bins_y + h];
for (++h; h < bin_index_yh; ++h) {
tmp_x += area_xc_yc * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_xc_yc * field_map_y_tensor[k * num_bins_y + h];
}
tmp_x += area_xc_yh * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_xc_yh * field_map_y_tensor[k * num_bins_y + h];
}
if (k == bin_index_xh) {
T px_h = node_x + node_size_x - (bin_index_xh * bin_size_x + xl);
T area_xh_yl = px_h * py_l;
T area_xh_yc = px_h * py_c;
T area_xh_yh = px_h * py_h;
h = bin_index_yl;
tmp_x += area_xh_yl * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_xh_yl * field_map_y_tensor[k * num_bins_y + h];
for (++h; h < bin_index_yh; ++h) {
tmp_x += area_xh_yc * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_xh_yc * field_map_y_tensor[k * num_bins_y + h];
}
tmp_x += area_xh_yh * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_xh_yh * field_map_y_tensor[k * num_bins_y + h];
}
atomicAdd(&s_x[threadIdx.y], tmp_x * ratio);
atomicAdd(&s_y[threadIdx.y], tmp_y * ratio);
__syncthreads();
if (threadIdx.x == 0) {
grad_x_tensor[i] = s_x[threadIdx.y];
grad_y_tensor[i] = s_y[threadIdx.y];
}
return;
}
case 1: {
extern __shared__ unsigned char shared_memory[];
T *s_x = (T *)shared_memory;
T *s_y = s_x + blockDim.y;
if (threadIdx.x == 0) {
s_x[threadIdx.y] = s_y[threadIdx.y] = 0;
}
__syncthreads();
T tmp_x = 0;
T tmp_y = 0;
T py = yl + bin_index_yl * bin_size_y + bin_size_y - node_y;
h = bin_index_yl;
k = bin_index_xl;
if (threadIdx.x == 0) {
T px_l = xl + bin_index_xl * bin_size_x + bin_size_x - node_x;
T area_xl = px_l * py;
tmp_x = area_xl * field_map_x_tensor[k * num_bins_y + h];
tmp_y = area_xl * field_map_y_tensor[k * num_bins_y + h];
k += blockDim.x;
}
T px_c = bin_size_x;
T area_xc = px_c * py;
for (k += threadIdx.x; k < bin_index_xh; k += blockDim.x) {
tmp_x += area_xc * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_xc * field_map_y_tensor[k * num_bins_y + h];
}
if (k == bin_index_xh) {
T px_h = node_x + node_size_x - (bin_index_xh * bin_size_x + xl);
T area_xh = px_h * py;
tmp_x += area_xh * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_xh * field_map_y_tensor[k * num_bins_y + h];
}
atomicAdd(&s_x[threadIdx.y], tmp_x * ratio);
atomicAdd(&s_y[threadIdx.y], tmp_y * ratio);
__syncthreads();
if (threadIdx.x == 0) {
grad_x_tensor[i] = s_x[threadIdx.y];
grad_y_tensor[i] = s_y[threadIdx.y];
}
return;
}
case 2: {
extern __shared__ unsigned char shared_memory[];
T *s_x = (T *)shared_memory;
T *s_y = s_x + blockDim.y;
if (threadIdx.x == 0) {
s_x[threadIdx.y] = s_y[threadIdx.y] = 0;
}
__syncthreads();
T tmp_x = 0;
T tmp_y = 0;
T px = xl + bin_index_xl * bin_size_x + bin_size_x - node_x;
k = bin_index_xl;
h = bin_index_yl;
if (threadIdx.x == 0) {
T py_l = yl + bin_index_yl * bin_size_y + bin_size_y - node_y;
T area_yl = px * py_l;
tmp_x = area_yl * field_map_x_tensor[k * num_bins_y + h];
tmp_y = area_yl * field_map_y_tensor[k * num_bins_y + h];
h += blockDim.x;
}
T py_c = bin_size_y;
T area_yc = px * py_c;
for (h += threadIdx.x; h < bin_index_yh; h += blockDim.x) {
tmp_x += area_yc * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_yc * field_map_y_tensor[k * num_bins_y + h];
}
if (h == bin_index_yh) {
T py_h = node_y + node_size_y - (bin_index_yh * bin_size_y + yl);
T area_yh = px * py_h;
tmp_x += area_yh * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_yh * field_map_y_tensor[k * num_bins_y + h];
}
atomicAdd(&s_x[threadIdx.y], tmp_x * ratio);
atomicAdd(&s_y[threadIdx.y], tmp_y * ratio);
__syncthreads();
if (threadIdx.x == 0) {
grad_x_tensor[i] = s_x[threadIdx.y];
grad_y_tensor[i] = s_y[threadIdx.y];
}
return;
}
case 3: {
if (threadIdx.x == 0) {
T px = xl + bin_index_xl * bin_size_x + bin_size_x - node_x;
T py = yl + bin_index_yl * bin_size_y + bin_size_y - node_y;
T area_by_ratio = px * py * ratio;
k = bin_index_xl;
h = bin_index_yl;
grad_x_tensor[i] =
area_by_ratio * field_map_x_tensor[k * num_bins_y + h];
grad_y_tensor[i] =
area_by_ratio * field_map_y_tensor[k * num_bins_y + h];
}
return;
}
default:
assert(0);
}
}
}
template <typename T>
__global__ void computeElectricForceSimpleLikeCPU(
int num_bins_x, int num_bins_y, int num_impacted_bins_x,
int num_impacted_bins_y, const T *field_map_x_tensor,
const T *field_map_y_tensor, const T *x_tensor, const T *y_tensor,
const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor,
const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor,
const T *bin_center_x_tensor, const T *bin_center_y_tensor, T xl, T yl,
T xh, T yh, T bin_size_x, T bin_size_y, int num_nodes, T *grad_x_tensor,
T *grad_y_tensor) {
// density_map_tensor should be initialized outside
T inv_bin_size_x = 1.0 / bin_size_x;
T inv_bin_size_y = 1.0 / bin_size_y;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_nodes) {
// use stretched node size
T node_size_x = node_size_x_clamped_tensor[i];
T node_size_y = node_size_y_clamped_tensor[i];
T node_x = x_tensor[i] + offset_x_tensor[i];
T node_y = y_tensor[i] + offset_y_tensor[i];
T ratio = ratio_tensor[i];
// Yibo: looks very weird implementation, but this is how RePlAce implements
// it the common practice should be floor Zixuan and Jiaqi: use the common
// practice of floor
int bin_index_xl = int((node_x - xl) * inv_bin_size_x);
int bin_index_xh =
int(((node_x + node_size_x - xl) * inv_bin_size_x)) + 1; // exclusive
bin_index_xl = DREAMPLACE_STD_NAMESPACE::max(bin_index_xl, 0);
bin_index_xh = DREAMPLACE_STD_NAMESPACE::min(bin_index_xh, num_bins_x);
// int bin_index_xh = bin_index_xl+num_impacted_bins_x;
// Yibo: looks very weird implementation, but this is how RePlAce implements
// it the common practice should be floor Zixuan and Jiaqi: use the common
// practice of floor
int bin_index_yl = int((node_y - yl) * inv_bin_size_y);
int bin_index_yh =
int(((node_y + node_size_y - yl) * inv_bin_size_y)) + 1; // exclusive
bin_index_yl = DREAMPLACE_STD_NAMESPACE::max(bin_index_yl, 0);
bin_index_yh = DREAMPLACE_STD_NAMESPACE::min(bin_index_yh, num_bins_y);
// int bin_index_yh = bin_index_yl+num_impacted_bins_y;
T &gx = grad_x_tensor[i];
T &gy = grad_y_tensor[i];
gx = 0;
gy = 0;
// update density potential map
for (int k = bin_index_xl; k < bin_index_xh; ++k) {
T px = triangle_density_function(node_x, node_size_x, xl, k, bin_size_x);
for (int h = bin_index_yl; h < bin_index_yh; ++h) {
T py =
triangle_density_function(node_y, node_size_y, yl, h, bin_size_y);
T area = px * py;
int idx = k * num_bins_y + h;
gx += area * field_map_x_tensor[idx];
gy += area * field_map_y_tensor[idx];
}
}
gx *= ratio;
gy *= ratio;
}
}
template <typename T>
int computeElectricForceCudaLauncher(
int num_bins_x, int num_bins_y, int num_impacted_bins_x,
int num_impacted_bins_y, const T *field_map_x_tensor,
const T *field_map_y_tensor, const T *x_tensor, const T *y_tensor,
const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor,
const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor,
const T *bin_center_x_tensor, const T *bin_center_y_tensor, T xl, T yl,
T xh, T yh, T bin_size_x, T bin_size_y, int num_nodes, T *grad_x_tensor,
T *grad_y_tensor, const int *sorted_node_map) {
int thread_count = 64;
dim3 blockSize(2, 2, thread_count);
size_t shared_mem_size = sizeof(T) * thread_count * 2;
int block_count_nodes = (num_nodes + thread_count - 1) / thread_count;
hipLaunchKernelGGL(( computeElectricForce), dim3(block_count_nodes), dim3(blockSize), shared_mem_size, 0,
num_bins_x, num_bins_y, field_map_x_tensor, field_map_y_tensor, x_tensor,
y_tensor, node_size_x_clamped_tensor, node_size_y_clamped_tensor,
offset_x_tensor, offset_y_tensor, ratio_tensor, bin_center_x_tensor,
bin_center_y_tensor, xl, yl, xh, yh, bin_size_x / 2, bin_size_y / 2,
bin_size_x, bin_size_y, 1 / bin_size_x, 1 / bin_size_y, num_nodes,
grad_x_tensor, grad_y_tensor, sorted_node_map);
// computeElectricForceSimpleLikeCPU<<<block_count_nodes, thread_count>>>(
// num_bins_x, num_bins_y,
// num_impacted_bins_x, num_impacted_bins_y,
// field_map_x_tensor, field_map_y_tensor,
// x_tensor, y_tensor,
// node_size_x_clamped_tensor, node_size_y_clamped_tensor,
// offset_x_tensor, offset_y_tensor,
// ratio_tensor,
// bin_center_x_tensor, bin_center_y_tensor,
// xl, yl, xh, yh,
// bin_size_x, bin_size_y,
// num_nodes,
// grad_x_tensor, grad_y_tensor
// );
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T) \
template int computeElectricForceCudaLauncher<T>( \
int num_bins_x, int num_bins_y, int num_impacted_bins_x, \
int num_impacted_bins_y, const T *field_map_x_tensor, \
const T *field_map_y_tensor, const T *x_tensor, const T *y_tensor, \
const T *node_size_x_clamped_tensor, \
const T *node_size_y_clamped_tensor, const T *offset_x_tensor, \
const T *offset_y_tensor, const T *ratio_tensor, \
const T *bin_center_x_tensor, const T *bin_center_y_tensor, T xl, T yl, \
T xh, T yh, T bin_size_x, T bin_size_y, int num_nodes, T *grad_x_tensor, \
T *grad_y_tensor, const int *sorted_node_map);
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
| a872722b3d9c9dbdfc7717d0d4aedd311b90a1c1.cu | /**
* @file electric_force_cuda_kernel.cu
* @author Yibo Lin
* @date Aug 2018
*/
#include <float.h>
#include <math.h>
#include <stdio.h>
#include "cuda_runtime.h"
#include "utility/src/utils.cuh"
// local dependency
#include "electric_potential/src/density_function.h"
DREAMPLACE_BEGIN_NAMESPACE
/// define triangle_density_function
template <typename T>
inline __device__ DEFINE_TRIANGLE_DENSITY_FUNCTION(T);
template <typename T>
__global__ void __launch_bounds__(1024, 8) computeElectricForce(
int num_bins_x, int num_bins_y, const T *field_map_x_tensor,
const T *field_map_y_tensor, const T *x_tensor, const T *y_tensor,
const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor,
const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor,
const T *bin_center_x_tensor, const T *bin_center_y_tensor, T xl, T yl,
T xh, T yh, const T half_bin_size_x, const T half_bin_size_y,
const T bin_size_x, const T bin_size_y, const T inv_bin_size_x,
const T inv_bin_size_y, int num_nodes, T *grad_x_tensor, T *grad_y_tensor,
const int *sorted_node_map ///< can be NULL if not sorted
) {
int index = blockIdx.x * blockDim.z + threadIdx.z;
if (index < num_nodes) {
int i = (sorted_node_map) ? sorted_node_map[index] : index;
// use stretched node size
T node_size_x = node_size_x_clamped_tensor[i];
T node_size_y = node_size_y_clamped_tensor[i];
T node_x = x_tensor[i] + offset_x_tensor[i];
T node_y = y_tensor[i] + offset_y_tensor[i];
T ratio = ratio_tensor[i];
// Yibo: looks very weird implementation, but this is how RePlAce implements
// it Zixuan and Jiaqi: use the common practice of floor
int bin_index_xl = int((node_x - xl) * inv_bin_size_x);
int bin_index_xh =
int(((node_x + node_size_x - xl) * inv_bin_size_x)) + 1; // exclusive
bin_index_xl = DREAMPLACE_STD_NAMESPACE::max(bin_index_xl, 0);
bin_index_xh = DREAMPLACE_STD_NAMESPACE::min(bin_index_xh, num_bins_x);
int bin_index_yl = int((node_y - yl) * inv_bin_size_y);
int bin_index_yh =
int(((node_y + node_size_y - yl) * inv_bin_size_y)) + 1; // exclusive
bin_index_yl = DREAMPLACE_STD_NAMESPACE::max(bin_index_yl, 0);
bin_index_yh = DREAMPLACE_STD_NAMESPACE::min(bin_index_yh, num_bins_y);
// blockDim.x * blockDim.y threads will be used to update one node
// shared memory is used to privatize the atomic memory access to thread
// block
extern __shared__ unsigned char s_xy[];
T *s_x = (T *)s_xy;
T *s_y = s_x + blockDim.z;
if (threadIdx.x == 0 && threadIdx.y == 0) {
s_x[threadIdx.z] = s_y[threadIdx.z] = 0;
}
__syncthreads();
T tmp_x, tmp_y;
tmp_x = 0;
tmp_y = 0;
// update density potential map
for (int k = bin_index_xl + threadIdx.y; k < bin_index_xh;
k += blockDim.y) {
T px = triangle_density_function(node_x, node_size_x, xl, k, bin_size_x);
for (int h = bin_index_yl + threadIdx.x; h < bin_index_yh;
h += blockDim.x) {
T py =
triangle_density_function(node_y, node_size_y, yl, h, bin_size_y);
T area = px * py;
int idx = k * num_bins_y + h;
tmp_x += area * field_map_x_tensor[idx];
tmp_y += area * field_map_y_tensor[idx];
}
}
atomicAdd(&s_x[threadIdx.z], tmp_x * ratio);
atomicAdd(&s_y[threadIdx.z], tmp_y * ratio);
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0) {
grad_x_tensor[i] = s_x[threadIdx.z];
grad_y_tensor[i] = s_y[threadIdx.z];
}
}
}
/// @brief An unrolled way to compute the force.
/// Currently it is not as efficient as computeElectricForce,
/// it has the potential to be better.
/// It is not used for now.
template <typename T>
__global__ void computeElectricForceUnroll(
int num_bins_x, int num_bins_y, const T *field_map_x_tensor,
const T *field_map_y_tensor, const T *x_tensor, const T *y_tensor,
const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor,
const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor,
const T *bin_center_x_tensor, const T *bin_center_y_tensor, T xl, T yl,
T xh, T yh, const T half_bin_size_x, const T half_bin_size_y,
const T bin_size_x, const T bin_size_y, const T inv_bin_size_x,
const T inv_bin_size_y, int num_nodes, T *grad_x_tensor, T *grad_y_tensor,
const int *sorted_node_map ///< can be NULL if not sorted
) {
int index = blockIdx.x * blockDim.y + threadIdx.y;
if (index < num_nodes) {
int i = (sorted_node_map) ? sorted_node_map[index] : index;
// stretch node size to bin size
T node_size_x = node_size_x_clamped_tensor[i];
T node_size_y = node_size_y_clamped_tensor[i];
T node_x = x_tensor[i] + offset_x_tensor[i];
T node_y = y_tensor[i] + offset_y_tensor[i];
T ratio = ratio_tensor[i];
// Yibo: looks very weird implementation, but this is how RePlAce implements
// it Zixuan and Jiaqi: use the common practice of floor
int bin_index_xl = int((node_x - xl) * inv_bin_size_x);
int bin_index_xh =
int(((node_x + node_size_x - xl) * inv_bin_size_x)); // inclusive
bin_index_xl = DREAMPLACE_STD_NAMESPACE::max(bin_index_xl, 0);
bin_index_xh = DREAMPLACE_STD_NAMESPACE::min(bin_index_xh, num_bins_x - 1);
int bin_index_yl = int((node_y - yl) * inv_bin_size_y);
int bin_index_yh =
int(((node_y + node_size_y - yl) * inv_bin_size_y)); // inclusive
bin_index_yl = DREAMPLACE_STD_NAMESPACE::max(bin_index_yl, 0);
bin_index_yh = DREAMPLACE_STD_NAMESPACE::min(bin_index_yh, num_bins_y - 1);
int k, h;
int cond =
((bin_index_xl == bin_index_xh) << 1) | (bin_index_yl == bin_index_yh);
switch (cond) {
case 0: {
// blockDim.x threads will be used to update one node
// shared memory is used to privatize the atomic memory access to thread
// block
extern __shared__ unsigned char shared_memory[];
T *s_x = (T *)shared_memory;
T *s_y = s_x + blockDim.y;
if (threadIdx.x == 0) {
s_x[threadIdx.y] = s_y[threadIdx.y] = 0;
}
__syncthreads();
T tmp_x = 0;
T tmp_y = 0;
T px_c = bin_size_x;
T py_l = yl + bin_index_yl * bin_size_y + bin_size_y - node_y;
T py_c = bin_size_y;
T py_h = node_y + node_size_y - (bin_index_yh * bin_size_y + yl);
T area_xc_yl = px_c * py_l;
T area_xc_yc = px_c * py_c;
T area_xc_yh = px_c * py_h;
k = bin_index_xl;
if (threadIdx.x == 0) {
T px_l = xl + bin_index_xl * bin_size_x + bin_size_x - node_x;
T area_xl_yl = px_l * py_l;
T area_xl_yc = px_l * py_c;
T area_xl_yh = px_l * py_h;
h = bin_index_yl;
tmp_x = area_xl_yl * field_map_x_tensor[k * num_bins_y + h];
tmp_y = area_xl_yl * field_map_y_tensor[k * num_bins_y + h];
for (++h; h < bin_index_yh; ++h) {
tmp_x += area_xl_yc * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_xl_yc * field_map_y_tensor[k * num_bins_y + h];
}
tmp_x += area_xl_yh * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_xl_yh * field_map_y_tensor[k * num_bins_y + h];
k += blockDim.x;
}
for (k += threadIdx.x; k < bin_index_xh; k += blockDim.x) {
h = bin_index_yl;
tmp_x += area_xc_yl * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_xc_yl * field_map_y_tensor[k * num_bins_y + h];
for (++h; h < bin_index_yh; ++h) {
tmp_x += area_xc_yc * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_xc_yc * field_map_y_tensor[k * num_bins_y + h];
}
tmp_x += area_xc_yh * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_xc_yh * field_map_y_tensor[k * num_bins_y + h];
}
if (k == bin_index_xh) {
T px_h = node_x + node_size_x - (bin_index_xh * bin_size_x + xl);
T area_xh_yl = px_h * py_l;
T area_xh_yc = px_h * py_c;
T area_xh_yh = px_h * py_h;
h = bin_index_yl;
tmp_x += area_xh_yl * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_xh_yl * field_map_y_tensor[k * num_bins_y + h];
for (++h; h < bin_index_yh; ++h) {
tmp_x += area_xh_yc * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_xh_yc * field_map_y_tensor[k * num_bins_y + h];
}
tmp_x += area_xh_yh * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_xh_yh * field_map_y_tensor[k * num_bins_y + h];
}
atomicAdd(&s_x[threadIdx.y], tmp_x * ratio);
atomicAdd(&s_y[threadIdx.y], tmp_y * ratio);
__syncthreads();
if (threadIdx.x == 0) {
grad_x_tensor[i] = s_x[threadIdx.y];
grad_y_tensor[i] = s_y[threadIdx.y];
}
return;
}
case 1: {
extern __shared__ unsigned char shared_memory[];
T *s_x = (T *)shared_memory;
T *s_y = s_x + blockDim.y;
if (threadIdx.x == 0) {
s_x[threadIdx.y] = s_y[threadIdx.y] = 0;
}
__syncthreads();
T tmp_x = 0;
T tmp_y = 0;
T py = yl + bin_index_yl * bin_size_y + bin_size_y - node_y;
h = bin_index_yl;
k = bin_index_xl;
if (threadIdx.x == 0) {
T px_l = xl + bin_index_xl * bin_size_x + bin_size_x - node_x;
T area_xl = px_l * py;
tmp_x = area_xl * field_map_x_tensor[k * num_bins_y + h];
tmp_y = area_xl * field_map_y_tensor[k * num_bins_y + h];
k += blockDim.x;
}
T px_c = bin_size_x;
T area_xc = px_c * py;
for (k += threadIdx.x; k < bin_index_xh; k += blockDim.x) {
tmp_x += area_xc * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_xc * field_map_y_tensor[k * num_bins_y + h];
}
if (k == bin_index_xh) {
T px_h = node_x + node_size_x - (bin_index_xh * bin_size_x + xl);
T area_xh = px_h * py;
tmp_x += area_xh * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_xh * field_map_y_tensor[k * num_bins_y + h];
}
atomicAdd(&s_x[threadIdx.y], tmp_x * ratio);
atomicAdd(&s_y[threadIdx.y], tmp_y * ratio);
__syncthreads();
if (threadIdx.x == 0) {
grad_x_tensor[i] = s_x[threadIdx.y];
grad_y_tensor[i] = s_y[threadIdx.y];
}
return;
}
case 2: {
extern __shared__ unsigned char shared_memory[];
T *s_x = (T *)shared_memory;
T *s_y = s_x + blockDim.y;
if (threadIdx.x == 0) {
s_x[threadIdx.y] = s_y[threadIdx.y] = 0;
}
__syncthreads();
T tmp_x = 0;
T tmp_y = 0;
T px = xl + bin_index_xl * bin_size_x + bin_size_x - node_x;
k = bin_index_xl;
h = bin_index_yl;
if (threadIdx.x == 0) {
T py_l = yl + bin_index_yl * bin_size_y + bin_size_y - node_y;
T area_yl = px * py_l;
tmp_x = area_yl * field_map_x_tensor[k * num_bins_y + h];
tmp_y = area_yl * field_map_y_tensor[k * num_bins_y + h];
h += blockDim.x;
}
T py_c = bin_size_y;
T area_yc = px * py_c;
for (h += threadIdx.x; h < bin_index_yh; h += blockDim.x) {
tmp_x += area_yc * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_yc * field_map_y_tensor[k * num_bins_y + h];
}
if (h == bin_index_yh) {
T py_h = node_y + node_size_y - (bin_index_yh * bin_size_y + yl);
T area_yh = px * py_h;
tmp_x += area_yh * field_map_x_tensor[k * num_bins_y + h];
tmp_y += area_yh * field_map_y_tensor[k * num_bins_y + h];
}
atomicAdd(&s_x[threadIdx.y], tmp_x * ratio);
atomicAdd(&s_y[threadIdx.y], tmp_y * ratio);
__syncthreads();
if (threadIdx.x == 0) {
grad_x_tensor[i] = s_x[threadIdx.y];
grad_y_tensor[i] = s_y[threadIdx.y];
}
return;
}
case 3: {
if (threadIdx.x == 0) {
T px = xl + bin_index_xl * bin_size_x + bin_size_x - node_x;
T py = yl + bin_index_yl * bin_size_y + bin_size_y - node_y;
T area_by_ratio = px * py * ratio;
k = bin_index_xl;
h = bin_index_yl;
grad_x_tensor[i] =
area_by_ratio * field_map_x_tensor[k * num_bins_y + h];
grad_y_tensor[i] =
area_by_ratio * field_map_y_tensor[k * num_bins_y + h];
}
return;
}
default:
assert(0);
}
}
}
template <typename T>
__global__ void computeElectricForceSimpleLikeCPU(
int num_bins_x, int num_bins_y, int num_impacted_bins_x,
int num_impacted_bins_y, const T *field_map_x_tensor,
const T *field_map_y_tensor, const T *x_tensor, const T *y_tensor,
const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor,
const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor,
const T *bin_center_x_tensor, const T *bin_center_y_tensor, T xl, T yl,
T xh, T yh, T bin_size_x, T bin_size_y, int num_nodes, T *grad_x_tensor,
T *grad_y_tensor) {
// density_map_tensor should be initialized outside
T inv_bin_size_x = 1.0 / bin_size_x;
T inv_bin_size_y = 1.0 / bin_size_y;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_nodes) {
// use stretched node size
T node_size_x = node_size_x_clamped_tensor[i];
T node_size_y = node_size_y_clamped_tensor[i];
T node_x = x_tensor[i] + offset_x_tensor[i];
T node_y = y_tensor[i] + offset_y_tensor[i];
T ratio = ratio_tensor[i];
// Yibo: looks very weird implementation, but this is how RePlAce implements
// it the common practice should be floor Zixuan and Jiaqi: use the common
// practice of floor
int bin_index_xl = int((node_x - xl) * inv_bin_size_x);
int bin_index_xh =
int(((node_x + node_size_x - xl) * inv_bin_size_x)) + 1; // exclusive
bin_index_xl = DREAMPLACE_STD_NAMESPACE::max(bin_index_xl, 0);
bin_index_xh = DREAMPLACE_STD_NAMESPACE::min(bin_index_xh, num_bins_x);
// int bin_index_xh = bin_index_xl+num_impacted_bins_x;
// Yibo: looks very weird implementation, but this is how RePlAce implements
// it the common practice should be floor Zixuan and Jiaqi: use the common
// practice of floor
int bin_index_yl = int((node_y - yl) * inv_bin_size_y);
int bin_index_yh =
int(((node_y + node_size_y - yl) * inv_bin_size_y)) + 1; // exclusive
bin_index_yl = DREAMPLACE_STD_NAMESPACE::max(bin_index_yl, 0);
bin_index_yh = DREAMPLACE_STD_NAMESPACE::min(bin_index_yh, num_bins_y);
// int bin_index_yh = bin_index_yl+num_impacted_bins_y;
T &gx = grad_x_tensor[i];
T &gy = grad_y_tensor[i];
gx = 0;
gy = 0;
// update density potential map
for (int k = bin_index_xl; k < bin_index_xh; ++k) {
T px = triangle_density_function(node_x, node_size_x, xl, k, bin_size_x);
for (int h = bin_index_yl; h < bin_index_yh; ++h) {
T py =
triangle_density_function(node_y, node_size_y, yl, h, bin_size_y);
T area = px * py;
int idx = k * num_bins_y + h;
gx += area * field_map_x_tensor[idx];
gy += area * field_map_y_tensor[idx];
}
}
gx *= ratio;
gy *= ratio;
}
}
template <typename T>
int computeElectricForceCudaLauncher(
int num_bins_x, int num_bins_y, int num_impacted_bins_x,
int num_impacted_bins_y, const T *field_map_x_tensor,
const T *field_map_y_tensor, const T *x_tensor, const T *y_tensor,
const T *node_size_x_clamped_tensor, const T *node_size_y_clamped_tensor,
const T *offset_x_tensor, const T *offset_y_tensor, const T *ratio_tensor,
const T *bin_center_x_tensor, const T *bin_center_y_tensor, T xl, T yl,
T xh, T yh, T bin_size_x, T bin_size_y, int num_nodes, T *grad_x_tensor,
T *grad_y_tensor, const int *sorted_node_map) {
int thread_count = 64;
dim3 blockSize(2, 2, thread_count);
size_t shared_mem_size = sizeof(T) * thread_count * 2;
int block_count_nodes = (num_nodes + thread_count - 1) / thread_count;
computeElectricForce<<<block_count_nodes, blockSize, shared_mem_size>>>(
num_bins_x, num_bins_y, field_map_x_tensor, field_map_y_tensor, x_tensor,
y_tensor, node_size_x_clamped_tensor, node_size_y_clamped_tensor,
offset_x_tensor, offset_y_tensor, ratio_tensor, bin_center_x_tensor,
bin_center_y_tensor, xl, yl, xh, yh, bin_size_x / 2, bin_size_y / 2,
bin_size_x, bin_size_y, 1 / bin_size_x, 1 / bin_size_y, num_nodes,
grad_x_tensor, grad_y_tensor, sorted_node_map);
// computeElectricForceSimpleLikeCPU<<<block_count_nodes, thread_count>>>(
// num_bins_x, num_bins_y,
// num_impacted_bins_x, num_impacted_bins_y,
// field_map_x_tensor, field_map_y_tensor,
// x_tensor, y_tensor,
// node_size_x_clamped_tensor, node_size_y_clamped_tensor,
// offset_x_tensor, offset_y_tensor,
// ratio_tensor,
// bin_center_x_tensor, bin_center_y_tensor,
// xl, yl, xh, yh,
// bin_size_x, bin_size_y,
// num_nodes,
// grad_x_tensor, grad_y_tensor
// );
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T) \
template int computeElectricForceCudaLauncher<T>( \
int num_bins_x, int num_bins_y, int num_impacted_bins_x, \
int num_impacted_bins_y, const T *field_map_x_tensor, \
const T *field_map_y_tensor, const T *x_tensor, const T *y_tensor, \
const T *node_size_x_clamped_tensor, \
const T *node_size_y_clamped_tensor, const T *offset_x_tensor, \
const T *offset_y_tensor, const T *ratio_tensor, \
const T *bin_center_x_tensor, const T *bin_center_y_tensor, T xl, T yl, \
T xh, T yh, T bin_size_x, T bin_size_y, int num_nodes, T *grad_x_tensor, \
T *grad_y_tensor, const int *sorted_node_map);
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
|
2e9376a1415a1e29358824529bf777a5039cb4ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
template <int TILE>
class MulMat
{
public:
__global__ void MulMatrixKernel2(float *A, float *B, float *C, int N)
{
float res=0;
int x=blockIdx.x * TILE + threadIdx.x; // zmiany
int y=blockIdx.y * TILE + threadIdx.y; // zmiany
for (int k=0; k<N; ++k) {
float Ai=A[y * N + k];
float Bi=B[k * N + x];
res = Ai*Bi;
}
C[y * N + x]=res;
}
__global__ void MulMatrixKernel3(float* A, float* B, float* C, int N) // jdro - kernel
{
__shared__ float As[TILE][TILE];
__shared__ float Bs[TILE][TILE];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
// wiersz i kolumna elementu macierzy C do obliczenia
int ypos = by * TILE + ty;
int xpos = bx * TILE + tx;
float res = 0; // rezultat obliczany
// petla po kafelkach/tiles macierzy dla tego bloku
for (int m = 0; m < N/TILE; ++m) {
// wtki aduj wsplnie po elemencie A i B kafelka do pamieci dzielonej
As[ty][tx] = A[xpos*N + (m*TILE + tx)];
Bs[ty][tx] = B[(m*TILE + ty)*N + ypos];
__syncthreads();
for (int k = 0; k < TILE; ++k) // wlaciwe obliczenia
res += As[ty][k] * Bs[k][tx];
__syncthreads();
}
C[ypos*N + xpos] = res; // zwracamy do pamici globalnej
}
private:
} ;
int main() {
float * A;
float * B;
float * C;
MulMat< 8 >.MulMatrixKernel3( A, B, C, 8 );
}
| 2e9376a1415a1e29358824529bf777a5039cb4ae.cu |
template <int TILE>
class MulMat
{
public:
__global__ void MulMatrixKernel2(float *A, float *B, float *C, int N)
{
float res=0;
int x=blockIdx.x * TILE + threadIdx.x; // zmiany
int y=blockIdx.y * TILE + threadIdx.y; // zmiany
for (int k=0; k<N; ++k) {
float Ai=A[y * N + k];
float Bi=B[k * N + x];
res = Ai*Bi;
}
C[y * N + x]=res;
}
__global__ void MulMatrixKernel3(float* A, float* B, float* C, int N) // jądro - kernel
{
__shared__ float As[TILE][TILE];
__shared__ float Bs[TILE][TILE];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
// wiersz i kolumna elementu macierzy C do obliczenia
int ypos = by * TILE + ty;
int xpos = bx * TILE + tx;
float res = 0; // rezultat obliczany
// petla po kafelkach/tiles macierzy dla tego bloku
for (int m = 0; m < N/TILE; ++m) {
// wątki ładują wspólnie po elemencie A i B kafelka do pamieci dzielonej
As[ty][tx] = A[xpos*N + (m*TILE + tx)];
Bs[ty][tx] = B[(m*TILE + ty)*N + ypos];
__syncthreads();
for (int k = 0; k < TILE; ++k) // wlaściwe obliczenia
res += As[ty][k] * Bs[k][tx];
__syncthreads();
}
C[ypos*N + xpos] = res; // zwracamy do pamięci globalnej
}
private:
} ;
int main() {
float * A;
float * B;
float * C;
MulMat< 8 >.MulMatrixKernel3( A, B, C, 8 );
}
|
a49f2b6c6e46ab26121517f3694c12ab33cc9129.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include "caffe/common.hpp"
#include "caffe/cukcf/cukcf.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
__global__ void mul_C_kernel(const int n, const hipComplex* a,
const hipComplex* b, hipComplex* dst) {
CUDA_KERNEL_LOOP(index, n) {
dst[index] = cuCmulf(a[index], b[index]);
}
}
void caffe_gpu_mul_C(const int N, const hipComplex* a, const hipComplex* b,
hipComplex* dst) {
hipLaunchKernelGGL(( mul_C_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, dst);
}
__global__ void mul_cjC_kernel(const int n, const hipComplex* a,
const hipComplex* b, hipComplex* dst) {
CUDA_KERNEL_LOOP(index, n) {
dst[index] = cuCmulf(cuConjf(a[index]), b[index]);
}
}
void caffe_gpu_mul_cjC(const int N, const hipComplex* a, const hipComplex* b,
hipComplex* dst) {
hipLaunchKernelGGL(( mul_cjC_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, dst);
}
__global__ void add_scalar_C_kernel(const int n, const hipComplex* a,
const hipComplex alpha, hipComplex* dst) {
CUDA_KERNEL_LOOP(index, n) {
dst[index] = cuCaddf(a[index], alpha);
}
}
void caffe_gpu_add_scalar_C(const int N, const hipComplex* a, const hipComplex alpha,
hipComplex* dst) {
hipLaunchKernelGGL(( add_scalar_C_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, dst);
}
__global__ void div_C_kernel(const int n, const hipComplex* a,
const hipComplex* b, hipComplex* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = cuCdivf(a[index], b[index]);
}
}
void caffe_gpu_div_C(const int N, const hipComplex* a,
const hipComplex* b, hipComplex* dst) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_C_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, dst);
}
}
| a49f2b6c6e46ab26121517f3694c12ab33cc9129.cu | #include <math_functions.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include "caffe/common.hpp"
#include "caffe/cukcf/cukcf.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
__global__ void mul_C_kernel(const int n, const cuComplex* a,
const cuComplex* b, cuComplex* dst) {
CUDA_KERNEL_LOOP(index, n) {
dst[index] = cuCmulf(a[index], b[index]);
}
}
void caffe_gpu_mul_C(const int N, const cuComplex* a, const cuComplex* b,
cuComplex* dst) {
mul_C_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, dst);
}
__global__ void mul_cjC_kernel(const int n, const cuComplex* a,
const cuComplex* b, cuComplex* dst) {
CUDA_KERNEL_LOOP(index, n) {
dst[index] = cuCmulf(cuConjf(a[index]), b[index]);
}
}
void caffe_gpu_mul_cjC(const int N, const cuComplex* a, const cuComplex* b,
cuComplex* dst) {
mul_cjC_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, dst);
}
__global__ void add_scalar_C_kernel(const int n, const cuComplex* a,
const cuComplex alpha, cuComplex* dst) {
CUDA_KERNEL_LOOP(index, n) {
dst[index] = cuCaddf(a[index], alpha);
}
}
void caffe_gpu_add_scalar_C(const int N, const cuComplex* a, const cuComplex alpha,
cuComplex* dst) {
add_scalar_C_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, dst);
}
__global__ void div_C_kernel(const int n, const cuComplex* a,
const cuComplex* b, cuComplex* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = cuCdivf(a[index], b[index]);
}
}
void caffe_gpu_div_C(const int N, const cuComplex* a,
const cuComplex* b, cuComplex* dst) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_C_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, dst);
}
}
|
2e19fe80895c92edab1538ea98107ddc8dfc3986.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
// turing.cpp : Defines the entry point for the console application.
//
#include <iostream>
#include <vector>
#include <sstream>
#include <hash_map>
#include <algorithm>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
class MyClass_Hasher {
public:
static const size_t bucket_size = 10; // mean bucket size that the container should try not to exceed
static const size_t min_buckets = (1 << 10); // minimum number of buckets, power
MyClass_Hasher() {
// should be default-constructible
}
size_t operator()(const std::vector<int>& v) const {
size_t hash = 5381;
std::vector<int>::const_iterator iter = v.begin();
std::vector<int>::const_iterator end = v.end();
for(; iter != end; ++iter) {
hash = ((hash << 5) + hash) + *iter;
}
return hash;
}
bool operator()(const std::vector<int>& v1, const std::vector<int>& v2) const {
if (v1.size() != v2.size()) {
return true;
}
std::vector<int>::const_iterator iter1 = v1.begin();
std::vector<int>::const_iterator iter2 = v2.begin();
std::vector<int>::const_iterator end1 = v1.end();
for (; iter1 != end1; ++iter1, ++iter2) {
if (*iter1 != *iter2) {
return true;
}
}
return false;
}
};
class Machine
{
public:
Machine(long long machineNumber)
:transitionsTable_(),
currentStateIndex_(0),
tape_(),
currentIndexInTape_(0),
iterationNumber_(0),
machineNumber_(machineNumber),
maxIndexWriteInTape_(0)
{
long long nb = machineNumber;
transitionsTable_.resize(8);
{
// inverse order!!!!!
std::vector<std::vector<int> >::iterator iter = transitionsTable_.begin();
std::vector<std::vector<int> >::iterator end = transitionsTable_.end();
for (; iter != end; ++iter) {
int rest = nb % 16;
iter->resize(3);
// inverse order !!!
std::vector<int>::iterator subIter = iter->begin();
std::vector<int>::iterator subEnd = iter->end();
int subNb = rest;
for (; subIter != subEnd-1; ++subIter) {
int subRest = subNb % 2;
*subIter = subRest;
subNb = (subNb - subRest)/2;
}
*subIter = subNb;
nb = (nb - rest)/16;
}
}
tape_.resize(300, 0);
}
void process(std::hash_map <std::vector<int>, std::pair<long long, int>, MyClass_Hasher >& map)
{
for (int iterationCounter = 0; iterationCounter < 300; ++iterationCounter) {
int charRead = tape_[currentIndexInTape_];
std::vector<int>* pointerOnInstructionToFollow = NULL;
if (currentStateIndex_ == 0) {
if (charRead == 0) {
pointerOnInstructionToFollow = &transitionsTable_[0];
}
else {
pointerOnInstructionToFollow = &transitionsTable_[1];
}
}
else if (currentStateIndex_ == 1) {
if (charRead == 0) {
pointerOnInstructionToFollow = &transitionsTable_[2];
}
else {
pointerOnInstructionToFollow = &transitionsTable_[3];
}
}
else if (currentStateIndex_ == 2) {
if (charRead == 0) {
pointerOnInstructionToFollow = &transitionsTable_[4];
}
else {
pointerOnInstructionToFollow = &transitionsTable_[5];
}
}
else {
if (charRead == 0) {
pointerOnInstructionToFollow = &transitionsTable_[6];
}
else {
pointerOnInstructionToFollow = &transitionsTable_[7];
}
}
// write on the tape
tape_[currentIndexInTape_] = (*pointerOnInstructionToFollow)[1];
// move that in the if... see if it give the same.
// Update the index in tape
if ((*pointerOnInstructionToFollow)[0] == 0) {
currentIndexInTape_++;
if (currentIndexInTape_ > maxIndexWriteInTape_) {
maxIndexWriteInTape_ = currentIndexInTape_;
}
}
else {
currentIndexInTape_--;
if (currentIndexInTape_ < 0) {
std::vector<int> infosToStore;
infosToStore.reserve(maxIndexWriteInTape_);
if (maxIndexWriteInTape_ >= 0) {
for (int i = maxIndexWriteInTape_; i > 0; --i) {
infosToStore.push_back(tape_[i]);
}
infosToStore.push_back(tape_[0]);
}
std::hash_map <std::vector<int>, std::pair<long long, int>, MyClass_Hasher >::const_iterator mapIter = map.find(infosToStore);
if (mapIter == map.end()) {
map[infosToStore] = std::make_pair(machineNumber_, (iterationCounter+1));
}
else {
if (machineNumber_ < mapIter->second.first) {
map[infosToStore] = std::make_pair(machineNumber_, (iterationCounter+1));
}
}
return;
}
}
// Update state to go to.
currentStateIndex_ = (*pointerOnInstructionToFollow)[2];
}
}
long long machineNumber_;
int currentStateIndex_;
int currentIndexInTape_;
int iterationNumber_;
std::vector<int> tape_;
int maxIndexWriteInTape_;
std::vector<std::vector<int> > transitionsTable_;
};
// La fonction qui tourne sur la carte graphique en parallle
__global__ void
run_turing_machine(const long long machineNumBegin, bool* machineEnded, int* nbIteration, int* printedTape, int* sizeTape, int tapeLength, int numElements)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index >= numElements) {
return;
}
// Calculer le numro de la machine courante.
long long machineNumber = machineNumBegin + index;
// Rejeter les machines impaires.
if (machineNumber%2 == 1) {
sizeTape[index] = -1; // pour forcer le traitement ultrieur de cette machine.
machineEnded[index] = true;
return;
}
// currentTapepointer est un pointeur qui se met au dbut de la bande alloue pour la machine courante
int* currentTapepointer = printedTape + index*tapeLength;
for (int* tmp = currentTapepointer; tmp != currentTapepointer+tapeLength; ++tmp) {
// On initialise toute la bande 0.
*tmp = 0;
}
int currentStateIndex_ = 0;
int currentIndexInTape_ = 0;
int iterationNumber_ = 0;
int maxIndexWriteInTape_ = 0;
machineEnded[index] = false;
sizeTape[index] = 0;
int transitionsTable[3*8];
{
int rest;
int subNb;
for (int i = 0; i < 8; ++i) {
rest = machineNumber % 16;
// inverse order !!!
subNb = rest;
int j = 0;
for (; j < 2; ) {
int subRest = subNb % 2;
transitionsTable[3*i+j] = subRest;
subNb = (subNb - subRest)/2;
j++;
}
transitionsTable[3*i+j] = subNb;
machineNumber = (machineNumber - rest)/16;
}
}
for (int iterationCounter = 0; iterationCounter < 300; ++iterationCounter) {
int charRead = currentTapepointer[currentIndexInTape_];
int* pointerOnInstructionToFollow = NULL;
if (currentStateIndex_ == 0) {
if (charRead == 0) {
pointerOnInstructionToFollow = &transitionsTable[3*0];
}
else {
pointerOnInstructionToFollow = &transitionsTable[3*1];
}
}
else if (currentStateIndex_ == 1) {
if (charRead == 0) {
pointerOnInstructionToFollow = &transitionsTable[3*2];
}
else {
pointerOnInstructionToFollow = &transitionsTable[3*3];
}
}
else if (currentStateIndex_ == 2) {
if (charRead == 0) {
pointerOnInstructionToFollow = &transitionsTable[3*4];
}
else {
pointerOnInstructionToFollow = &transitionsTable[3*5];
}
}
else {
if (charRead == 0) {
pointerOnInstructionToFollow = &transitionsTable[3*6];
}
else {
pointerOnInstructionToFollow = &transitionsTable[3*7];
}
}
// write on the tape
currentTapepointer[currentIndexInTape_] = pointerOnInstructionToFollow[1];
// Update the index in tape
if (pointerOnInstructionToFollow[0] == 0) {
currentIndexInTape_++;
if (currentIndexInTape_ > maxIndexWriteInTape_) {
maxIndexWriteInTape_ = currentIndexInTape_;
}
if (currentIndexInTape_ >= tapeLength) {
return;
}
}
else {
currentIndexInTape_--;
if (currentIndexInTape_ < 0) {
machineEnded[index] = true;
nbIteration[index] = iterationCounter+1;
sizeTape[index] = maxIndexWriteInTape_;
return;
}
}
// Update state to go to.
currentStateIndex_ = pointerOnInstructionToFollow[2];
}
}
/*bool sym4 (long long int nb)
{
long long int quo, nx, min;
short i,j,nb16[6][8]={};
quo=min=nb;
for (i=7; quo>0; i--) {nb16[0][i] = quo%16; quo=quo/16;}
nb16[1][0]=nb16[2][2]=nb16[3][4]=nb16[4][2]=nb16[5][4]=nb16[0][0];
nb16[1][1]=nb16[2][3]=nb16[3][5]=nb16[4][3]=nb16[5][5]=nb16[0][1];
nb16[1][4]=nb16[2][0]=nb16[3][0]=nb16[4][4]=nb16[5][2]=nb16[0][2];
nb16[1][5]=nb16[2][1]=nb16[3][1]=nb16[4][5]=nb16[5][3]=nb16[0][3];
nb16[1][2]=nb16[2][4]=nb16[3][2]=nb16[4][0]=nb16[5][0]=nb16[0][4];
nb16[1][3]=nb16[2][5]=nb16[3][3]=nb16[4][1]=nb16[5][1]=nb16[0][5];
nb16[1][6]=nb16[2][6]=nb16[3][6]=nb16[4][6]=nb16[5][6]=nb16[0][6];
nb16[1][7]=nb16[2][7]=nb16[3][7]=nb16[4][7]=nb16[5][7]=nb16[0][7];
for (i=0; i<8; i++)
{
switch (nb16[1][i]/4)
{case 0 : break;
case 1 : nb16[1][i]=nb16[1][i]+4; break;
case 2 : nb16[1][i]=nb16[1][i]-4; break;
case 3 : break;}
switch (nb16[2][i]/4)
{case 0 : break;
case 1 : break;
case 2 : nb16[2][i]=nb16[2][i]+4; break;
case 3 : nb16[2][i]=nb16[2][i]-4; break;}
switch (nb16[3][i]/4)
{case 0 : break;
case 1 : nb16[3][i]=nb16[3][i]+4; break;
case 2 : nb16[3][i]=nb16[3][i]+4; break;
case 3 : nb16[3][i]=nb16[3][i]-8; break;}
switch (nb16[4][i]/4)
{case 0 : break;
case 1 : nb16[4][i]=nb16[4][i]+8; break;
case 2 : nb16[4][i]=nb16[4][i]-4; break;
case 3 : nb16[4][i]=nb16[4][i]-4; break;}
switch (nb16[5][i]/4)
{case 0 : break;
case 1 : nb16[5][i]=nb16[5][i]+8; break;
case 2 : break;
case 3 : nb16[5][i]=nb16[5][i]-8; break;}
}
for (j=1; j<6; j++) {nx=0;for (i=0; i<8; i++) {nx=16*nx + nb16[j][i];} if (nx<min) min=nx; }
if (nb == min) return (false); else return (true);
}*/
int main(int argc, char* argv[])
{
std::hash_map <std::vector<int>, std::pair<long long, int>, MyClass_Hasher > map;
// Ce nombre reprsente le nombre de machines qui vont tourner en parrallle avant chaque resynchronisation sur le cpu.
// A priori plus il est grand plus rapide ce sera mais il faut veiller ne pas dpasser une certaine limite sous peine de ne
// plus avoir de mmoire et que le programme plante.
// Une marge d'erreur est prfrable d'autant plus si le pc fait tourner d'autres choses en mme temps.
const int maxSimultaneously = 1024*100;
// longueur de la bande d'impression relative chaque machine de turing.
const int tapeLength = 20;
// Allocation des structures sur le processeur.
int* nbIteration = (int*)malloc(maxSimultaneously*sizeof(int));
bool* machineEnded = (bool*)malloc(maxSimultaneously*sizeof(bool));
int* tapeSize = (int*) malloc(maxSimultaneously*sizeof(int));
int* printedTape = (int*)malloc(tapeLength*maxSimultaneously*sizeof(int));
int* d_nbIteration = NULL;
bool* d_machineEnded = NULL;
int* d_tapeSize = NULL;
int* d_printedTape = NULL;
// Allocation des structures sur le GPU.
hipMalloc((void **)&d_nbIteration, maxSimultaneously*sizeof(int));
hipMalloc((void **)&d_machineEnded, maxSimultaneously*sizeof(bool));
hipMalloc((void **)&d_tapeSize, maxSimultaneously*sizeof(int));
hipMalloc((void **)&d_printedTape, tapeLength*maxSimultaneously*sizeof(int));
long long numberMachinesStopped = 0;
// Remarque : Pour effectuer des petits tests, on peut rduire le nombre de machine calculer....
long long limit = 4294967296;
for (long long currentMachineNum = 0; currentMachineNum < limit; ) {/*if (sym4(currentMachineNum)) continue;*/
int maxNbelements = maxSimultaneously;
if (currentMachineNum+maxSimultaneously > limit) {
long long tmp = limit - currentMachineNum;
maxNbelements = (int) tmp;
}
// threadsPerBlock et blocksPerGrid sont 2 paramtres demand par cuda: Ils peuvent tre modifi dpendant
// de la carte graphique.
int threadsPerBlock = 1024;
int blocksPerGrid =maxSimultaneously/threadsPerBlock;//(maxSimultaneously + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( run_turing_machine), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, currentMachineNum, d_machineEnded, d_nbIteration, d_printedTape, d_tapeSize, tapeLength, maxNbelements);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
// Erreur Cuda, peut tre les paramtres ont des valeurs trop leve pour la carte graphique utilise...
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Rappatrier les donnes de la carte graphique dans les structures processeur afin de pouvoir les traiter.
hipMemcpy(nbIteration, d_nbIteration, maxSimultaneously*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(machineEnded, d_machineEnded, maxSimultaneously*sizeof(bool), hipMemcpyDeviceToHost);
hipMemcpy(tapeSize, d_tapeSize, maxSimultaneously*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(printedTape, d_printedTape, tapeLength*maxSimultaneously*sizeof(int*), hipMemcpyDeviceToHost);
// Parcour de tous les lments.
for (int k = 0; k < maxNbelements; k++) {
// Test la pertinence des donnes pour la machine k (machine s'est arrte et crit quelque chose?)
if (machineEnded[k] == true && tapeSize[k] >= 0) {
std::vector<int> infosToStore;
infosToStore.reserve(tapeSize[k]+1);
// Mettre l'impression de la bande dans un vecteur afin de pouvoir faire une recherche dans la table de hash.
int* pointerBegin = &printedTape[k*tapeLength];
int* pointerEnd = &printedTape[k*tapeLength]+tapeSize[k]+1;
for (;pointerBegin != pointerEnd; ++pointerBegin) {
infosToStore.push_back(*pointerBegin);
}
long long curMachine = currentMachineNum+k;
std::hash_map <std::vector<int>, std::pair<long long, int>, MyClass_Hasher >::const_iterator mapIter = map.find(infosToStore);
if (mapIter == map.end()) {
// Rsulat non trouv, on l'insre donc.
map[infosToStore] = std::make_pair(curMachine, nbIteration[k]);
}
else {
// Rsultat dj existant, on insre seulement si le numro de la machine est plus petit que celui dj stock.
if (curMachine < mapIter->second.first) {
map[infosToStore] = std::make_pair(curMachine, nbIteration[k]);
}
}
numberMachinesStopped++;
}
}
currentMachineNum += maxSimultaneously;
}
// Libration de la mmoire sur GPU
hipFree(d_nbIteration);
hipFree(d_machineEnded);
hipFree(d_tapeSize);
hipFree(d_printedTape);
// Libration de la mmoire sur CPU
free(nbIteration);
free(machineEnded);
free(tapeSize);
free(printedTape);
// Voir explication projet 3statesCpu...
FILE* f = fopen("MT4_Cuda1.txt", "w");
fprintf(f, "Nombre de machines executees: %lld\n", limit);
fprintf(f, "Nombre de machines qui se sont arretee: %lld\n", numberMachinesStopped);
std::vector<std::pair<long long, std::pair<int, std::vector<int> > > > allMachines;
{
std::hash_map <std::vector<int>, std::pair<long long, int>, MyClass_Hasher >::const_iterator iter = map.begin();
std::hash_map <std::vector<int>, std::pair<long long, int>, MyClass_Hasher >::const_iterator end = map.end();
for (; iter != end; ++iter) {
allMachines.push_back(std::make_pair(iter->second.first, std::make_pair(iter->second.second, iter->first)));
}
}
std::sort(allMachines.begin(), allMachines.end());
{
std::vector<std::pair<long long, std::pair<int, std::vector<int> > > >::const_iterator iter = allMachines.begin();
std::vector<std::pair<long long, std::pair<int, std::vector<int> > > >::const_iterator end = allMachines.end();
for (; iter != end; ++iter) {
const std::vector<int>& vect = (iter->second.second);
fprintf(f, "{%lld, %d, {", iter->first, iter->second.first);
if (vect.size() >= 0) {
std::vector<int>::const_reverse_iterator tapeIter = vect.rbegin();
std::vector<int>::const_reverse_iterator tapeEnd = vect.rend();
for (; tapeIter != tapeEnd-1; ++tapeIter) {
fprintf(f, "%d, ", *tapeIter);
}
fprintf(f, "%d", *tapeIter);
}
fprintf(f, "}}\n");
}
}
fclose(f);
std::cout << "process terminated" << std::endl;
std::cin;
}
| 2e19fe80895c92edab1538ea98107ddc8dfc3986.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
// turing.cpp : Defines the entry point for the console application.
//
#include <iostream>
#include <vector>
#include <sstream>
#include <hash_map>
#include <algorithm>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
class MyClass_Hasher {
public:
static const size_t bucket_size = 10; // mean bucket size that the container should try not to exceed
static const size_t min_buckets = (1 << 10); // minimum number of buckets, power
MyClass_Hasher() {
// should be default-constructible
}
size_t operator()(const std::vector<int>& v) const {
size_t hash = 5381;
std::vector<int>::const_iterator iter = v.begin();
std::vector<int>::const_iterator end = v.end();
for(; iter != end; ++iter) {
hash = ((hash << 5) + hash) + *iter;
}
return hash;
}
bool operator()(const std::vector<int>& v1, const std::vector<int>& v2) const {
if (v1.size() != v2.size()) {
return true;
}
std::vector<int>::const_iterator iter1 = v1.begin();
std::vector<int>::const_iterator iter2 = v2.begin();
std::vector<int>::const_iterator end1 = v1.end();
for (; iter1 != end1; ++iter1, ++iter2) {
if (*iter1 != *iter2) {
return true;
}
}
return false;
}
};
class Machine
{
public:
Machine(long long machineNumber)
:transitionsTable_(),
currentStateIndex_(0),
tape_(),
currentIndexInTape_(0),
iterationNumber_(0),
machineNumber_(machineNumber),
maxIndexWriteInTape_(0)
{
long long nb = machineNumber;
transitionsTable_.resize(8);
{
// inverse order!!!!!
std::vector<std::vector<int> >::iterator iter = transitionsTable_.begin();
std::vector<std::vector<int> >::iterator end = transitionsTable_.end();
for (; iter != end; ++iter) {
int rest = nb % 16;
iter->resize(3);
// inverse order !!!
std::vector<int>::iterator subIter = iter->begin();
std::vector<int>::iterator subEnd = iter->end();
int subNb = rest;
for (; subIter != subEnd-1; ++subIter) {
int subRest = subNb % 2;
*subIter = subRest;
subNb = (subNb - subRest)/2;
}
*subIter = subNb;
nb = (nb - rest)/16;
}
}
tape_.resize(300, 0);
}
void process(std::hash_map <std::vector<int>, std::pair<long long, int>, MyClass_Hasher >& map)
{
for (int iterationCounter = 0; iterationCounter < 300; ++iterationCounter) {
int charRead = tape_[currentIndexInTape_];
std::vector<int>* pointerOnInstructionToFollow = NULL;
if (currentStateIndex_ == 0) {
if (charRead == 0) {
pointerOnInstructionToFollow = &transitionsTable_[0];
}
else {
pointerOnInstructionToFollow = &transitionsTable_[1];
}
}
else if (currentStateIndex_ == 1) {
if (charRead == 0) {
pointerOnInstructionToFollow = &transitionsTable_[2];
}
else {
pointerOnInstructionToFollow = &transitionsTable_[3];
}
}
else if (currentStateIndex_ == 2) {
if (charRead == 0) {
pointerOnInstructionToFollow = &transitionsTable_[4];
}
else {
pointerOnInstructionToFollow = &transitionsTable_[5];
}
}
else {
if (charRead == 0) {
pointerOnInstructionToFollow = &transitionsTable_[6];
}
else {
pointerOnInstructionToFollow = &transitionsTable_[7];
}
}
// write on the tape
tape_[currentIndexInTape_] = (*pointerOnInstructionToFollow)[1];
// move that in the if... see if it give the same.
// Update the index in tape
if ((*pointerOnInstructionToFollow)[0] == 0) {
currentIndexInTape_++;
if (currentIndexInTape_ > maxIndexWriteInTape_) {
maxIndexWriteInTape_ = currentIndexInTape_;
}
}
else {
currentIndexInTape_--;
if (currentIndexInTape_ < 0) {
std::vector<int> infosToStore;
infosToStore.reserve(maxIndexWriteInTape_);
if (maxIndexWriteInTape_ >= 0) {
for (int i = maxIndexWriteInTape_; i > 0; --i) {
infosToStore.push_back(tape_[i]);
}
infosToStore.push_back(tape_[0]);
}
std::hash_map <std::vector<int>, std::pair<long long, int>, MyClass_Hasher >::const_iterator mapIter = map.find(infosToStore);
if (mapIter == map.end()) {
map[infosToStore] = std::make_pair(machineNumber_, (iterationCounter+1));
}
else {
if (machineNumber_ < mapIter->second.first) {
map[infosToStore] = std::make_pair(machineNumber_, (iterationCounter+1));
}
}
return;
}
}
// Update state to go to.
currentStateIndex_ = (*pointerOnInstructionToFollow)[2];
}
}
long long machineNumber_;
int currentStateIndex_;
int currentIndexInTape_;
int iterationNumber_;
std::vector<int> tape_;
int maxIndexWriteInTape_;
std::vector<std::vector<int> > transitionsTable_;
};
// La fonction qui tourne sur la carte graphique en parallèle
__global__ void
run_turing_machine(const long long machineNumBegin, bool* machineEnded, int* nbIteration, int* printedTape, int* sizeTape, int tapeLength, int numElements)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index >= numElements) {
return;
}
// Calculer le numéro de la machine courante.
long long machineNumber = machineNumBegin + index;
// Rejeter les machines impaires.
if (machineNumber%2 == 1) {
sizeTape[index] = -1; // pour forcer le traitement ultérieur de cette machine.
machineEnded[index] = true;
return;
}
// currentTapepointer est un pointeur qui se met au début de la bande allouée pour la machine courante
int* currentTapepointer = printedTape + index*tapeLength;
for (int* tmp = currentTapepointer; tmp != currentTapepointer+tapeLength; ++tmp) {
// On initialise toute la bande à 0.
*tmp = 0;
}
int currentStateIndex_ = 0;
int currentIndexInTape_ = 0;
int iterationNumber_ = 0;
int maxIndexWriteInTape_ = 0;
machineEnded[index] = false;
sizeTape[index] = 0;
int transitionsTable[3*8];
{
int rest;
int subNb;
for (int i = 0; i < 8; ++i) {
rest = machineNumber % 16;
// inverse order !!!
subNb = rest;
int j = 0;
for (; j < 2; ) {
int subRest = subNb % 2;
transitionsTable[3*i+j] = subRest;
subNb = (subNb - subRest)/2;
j++;
}
transitionsTable[3*i+j] = subNb;
machineNumber = (machineNumber - rest)/16;
}
}
for (int iterationCounter = 0; iterationCounter < 300; ++iterationCounter) {
int charRead = currentTapepointer[currentIndexInTape_];
int* pointerOnInstructionToFollow = NULL;
if (currentStateIndex_ == 0) {
if (charRead == 0) {
pointerOnInstructionToFollow = &transitionsTable[3*0];
}
else {
pointerOnInstructionToFollow = &transitionsTable[3*1];
}
}
else if (currentStateIndex_ == 1) {
if (charRead == 0) {
pointerOnInstructionToFollow = &transitionsTable[3*2];
}
else {
pointerOnInstructionToFollow = &transitionsTable[3*3];
}
}
else if (currentStateIndex_ == 2) {
if (charRead == 0) {
pointerOnInstructionToFollow = &transitionsTable[3*4];
}
else {
pointerOnInstructionToFollow = &transitionsTable[3*5];
}
}
else {
if (charRead == 0) {
pointerOnInstructionToFollow = &transitionsTable[3*6];
}
else {
pointerOnInstructionToFollow = &transitionsTable[3*7];
}
}
// write on the tape
currentTapepointer[currentIndexInTape_] = pointerOnInstructionToFollow[1];
// Update the index in tape
if (pointerOnInstructionToFollow[0] == 0) {
currentIndexInTape_++;
if (currentIndexInTape_ > maxIndexWriteInTape_) {
maxIndexWriteInTape_ = currentIndexInTape_;
}
if (currentIndexInTape_ >= tapeLength) {
return;
}
}
else {
currentIndexInTape_--;
if (currentIndexInTape_ < 0) {
machineEnded[index] = true;
nbIteration[index] = iterationCounter+1;
sizeTape[index] = maxIndexWriteInTape_;
return;
}
}
// Update state to go to.
currentStateIndex_ = pointerOnInstructionToFollow[2];
}
}
/*bool sym4 (long long int nb)
{
long long int quo, nx, min;
short i,j,nb16[6][8]={};
quo=min=nb;
for (i=7; quo>0; i--) {nb16[0][i] = quo%16; quo=quo/16;}
nb16[1][0]=nb16[2][2]=nb16[3][4]=nb16[4][2]=nb16[5][4]=nb16[0][0];
nb16[1][1]=nb16[2][3]=nb16[3][5]=nb16[4][3]=nb16[5][5]=nb16[0][1];
nb16[1][4]=nb16[2][0]=nb16[3][0]=nb16[4][4]=nb16[5][2]=nb16[0][2];
nb16[1][5]=nb16[2][1]=nb16[3][1]=nb16[4][5]=nb16[5][3]=nb16[0][3];
nb16[1][2]=nb16[2][4]=nb16[3][2]=nb16[4][0]=nb16[5][0]=nb16[0][4];
nb16[1][3]=nb16[2][5]=nb16[3][3]=nb16[4][1]=nb16[5][1]=nb16[0][5];
nb16[1][6]=nb16[2][6]=nb16[3][6]=nb16[4][6]=nb16[5][6]=nb16[0][6];
nb16[1][7]=nb16[2][7]=nb16[3][7]=nb16[4][7]=nb16[5][7]=nb16[0][7];
for (i=0; i<8; i++)
{
switch (nb16[1][i]/4)
{case 0 : break;
case 1 : nb16[1][i]=nb16[1][i]+4; break;
case 2 : nb16[1][i]=nb16[1][i]-4; break;
case 3 : break;}
switch (nb16[2][i]/4)
{case 0 : break;
case 1 : break;
case 2 : nb16[2][i]=nb16[2][i]+4; break;
case 3 : nb16[2][i]=nb16[2][i]-4; break;}
switch (nb16[3][i]/4)
{case 0 : break;
case 1 : nb16[3][i]=nb16[3][i]+4; break;
case 2 : nb16[3][i]=nb16[3][i]+4; break;
case 3 : nb16[3][i]=nb16[3][i]-8; break;}
switch (nb16[4][i]/4)
{case 0 : break;
case 1 : nb16[4][i]=nb16[4][i]+8; break;
case 2 : nb16[4][i]=nb16[4][i]-4; break;
case 3 : nb16[4][i]=nb16[4][i]-4; break;}
switch (nb16[5][i]/4)
{case 0 : break;
case 1 : nb16[5][i]=nb16[5][i]+8; break;
case 2 : break;
case 3 : nb16[5][i]=nb16[5][i]-8; break;}
}
for (j=1; j<6; j++) {nx=0;for (i=0; i<8; i++) {nx=16*nx + nb16[j][i];} if (nx<min) min=nx; }
if (nb == min) return (false); else return (true);
}*/
int main(int argc, char* argv[])
{
std::hash_map <std::vector<int>, std::pair<long long, int>, MyClass_Hasher > map;
// Ce nombre représente le nombre de machines qui vont tourner en parrallèle avant chaque resynchronisation sur le cpu.
// A priori plus il est grand plus rapide ce sera mais il faut veiller à ne pas dépasser une certaine limite sous peine de ne
// plus avoir de mémoire et que le programme plante.
// Une marge d'erreur est préférable d'autant plus si le pc fait tourner d'autres choses en même temps.
const int maxSimultaneously = 1024*100;
// longueur de la bande d'impression relative à chaque machine de turing.
const int tapeLength = 20;
// Allocation des structures sur le processeur.
int* nbIteration = (int*)malloc(maxSimultaneously*sizeof(int));
bool* machineEnded = (bool*)malloc(maxSimultaneously*sizeof(bool));
int* tapeSize = (int*) malloc(maxSimultaneously*sizeof(int));
int* printedTape = (int*)malloc(tapeLength*maxSimultaneously*sizeof(int));
int* d_nbIteration = NULL;
bool* d_machineEnded = NULL;
int* d_tapeSize = NULL;
int* d_printedTape = NULL;
// Allocation des structures sur le GPU.
cudaMalloc((void **)&d_nbIteration, maxSimultaneously*sizeof(int));
cudaMalloc((void **)&d_machineEnded, maxSimultaneously*sizeof(bool));
cudaMalloc((void **)&d_tapeSize, maxSimultaneously*sizeof(int));
cudaMalloc((void **)&d_printedTape, tapeLength*maxSimultaneously*sizeof(int));
long long numberMachinesStopped = 0;
// Remarque : Pour effectuer des petits tests, on peut réduire le nombre de machine à calculer....
long long limit = 4294967296;
for (long long currentMachineNum = 0; currentMachineNum < limit; ) {/*if (sym4(currentMachineNum)) continue;*/
int maxNbelements = maxSimultaneously;
if (currentMachineNum+maxSimultaneously > limit) {
long long tmp = limit - currentMachineNum;
maxNbelements = (int) tmp;
}
// threadsPerBlock et blocksPerGrid sont 2 paramètres demandé par cuda: Ils peuvent être modifié dépendant
// de la carte graphique.
int threadsPerBlock = 1024;
int blocksPerGrid =maxSimultaneously/threadsPerBlock;//(maxSimultaneously + threadsPerBlock - 1) / threadsPerBlock;
run_turing_machine<<<blocksPerGrid, threadsPerBlock>>>(currentMachineNum, d_machineEnded, d_nbIteration, d_printedTape, d_tapeSize, tapeLength, maxNbelements);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
// Erreur Cuda, peut être les paramètres ont des valeurs trop élevée pour la carte graphique utilisée...
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Rappatrier les données de la carte graphique dans les structures processeur afin de pouvoir les traiter.
cudaMemcpy(nbIteration, d_nbIteration, maxSimultaneously*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(machineEnded, d_machineEnded, maxSimultaneously*sizeof(bool), cudaMemcpyDeviceToHost);
cudaMemcpy(tapeSize, d_tapeSize, maxSimultaneously*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(printedTape, d_printedTape, tapeLength*maxSimultaneously*sizeof(int*), cudaMemcpyDeviceToHost);
// Parcour de tous les éléments.
for (int k = 0; k < maxNbelements; k++) {
// Test la pertinence des données pour la machine k (machine s'est arrêtée et à écrit quelque chose?)
if (machineEnded[k] == true && tapeSize[k] >= 0) {
std::vector<int> infosToStore;
infosToStore.reserve(tapeSize[k]+1);
// Mettre l'impression de la bande dans un vecteur afin de pouvoir faire une recherche dans la table de hash.
int* pointerBegin = &printedTape[k*tapeLength];
int* pointerEnd = &printedTape[k*tapeLength]+tapeSize[k]+1;
for (;pointerBegin != pointerEnd; ++pointerBegin) {
infosToStore.push_back(*pointerBegin);
}
long long curMachine = currentMachineNum+k;
std::hash_map <std::vector<int>, std::pair<long long, int>, MyClass_Hasher >::const_iterator mapIter = map.find(infosToStore);
if (mapIter == map.end()) {
// Résulat non trouvé, on l'insère donc.
map[infosToStore] = std::make_pair(curMachine, nbIteration[k]);
}
else {
// Résultat déjà existant, on insère seulement si le numéro de la machine est plus petit que celui déjà stocké.
if (curMachine < mapIter->second.first) {
map[infosToStore] = std::make_pair(curMachine, nbIteration[k]);
}
}
numberMachinesStopped++;
}
}
currentMachineNum += maxSimultaneously;
}
// Libèration de la mémoire sur GPU
cudaFree(d_nbIteration);
cudaFree(d_machineEnded);
cudaFree(d_tapeSize);
cudaFree(d_printedTape);
// Libèration de la mémoire sur CPU
free(nbIteration);
free(machineEnded);
free(tapeSize);
free(printedTape);
// Voir explication projet 3statesCpu...
FILE* f = fopen("MT4_Cuda1.txt", "w");
fprintf(f, "Nombre de machines executees: %lld\n", limit);
fprintf(f, "Nombre de machines qui se sont arretee: %lld\n", numberMachinesStopped);
std::vector<std::pair<long long, std::pair<int, std::vector<int> > > > allMachines;
{
std::hash_map <std::vector<int>, std::pair<long long, int>, MyClass_Hasher >::const_iterator iter = map.begin();
std::hash_map <std::vector<int>, std::pair<long long, int>, MyClass_Hasher >::const_iterator end = map.end();
for (; iter != end; ++iter) {
allMachines.push_back(std::make_pair(iter->second.first, std::make_pair(iter->second.second, iter->first)));
}
}
std::sort(allMachines.begin(), allMachines.end());
{
std::vector<std::pair<long long, std::pair<int, std::vector<int> > > >::const_iterator iter = allMachines.begin();
std::vector<std::pair<long long, std::pair<int, std::vector<int> > > >::const_iterator end = allMachines.end();
for (; iter != end; ++iter) {
const std::vector<int>& vect = (iter->second.second);
fprintf(f, "{%lld, %d, {", iter->first, iter->second.first);
if (vect.size() >= 0) {
std::vector<int>::const_reverse_iterator tapeIter = vect.rbegin();
std::vector<int>::const_reverse_iterator tapeEnd = vect.rend();
for (; tapeIter != tapeEnd-1; ++tapeIter) {
fprintf(f, "%d, ", *tapeIter);
}
fprintf(f, "%d", *tapeIter);
}
fprintf(f, "}}\n");
}
}
fclose(f);
std::cout << "process terminated" << std::endl;
std::cin;
}
|
b781b7cd19532d0c13fe60b8183d4f6eef6ec724.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "improc.h"
#include "gpukernels.h"
#include <cstdio>
using namespace cv;
using namespace std;
#define MAX_2D_THREADS_PER_BLOCK 32
#define MAX_THREADS_PER_BLOCK 1024
int main(int argc, char** argv)
{
char* imageName = argv[1];
char* templName = argv[2];
Mat image_rgb = imread(imageName, 1);
Mat image = imread(imageName, 0);
Mat templ = imread(templName, 0);
const int img_rows = image.rows;
const int img_cols = image.cols;
const int tmp_rows = templ.rows;
const int tmp_cols = templ.cols;
const int ker_rows = 15;
const int ker_cols = 15;
const int offset_rows = ker_rows / 2; // 3 -> 1, 4 -> 2, 5 -> 2, also the size of apron
const int offset_cols = ker_cols / 2; // 3 -> 1, 4 -> 2, 5 -> 2
/** Load the image using OpenCV */
double **src = img2Array(image);
double **T = img2Array(templ);
/** Gaussian filtering on CPU test */
double **smoothed_img = cudaMallocManaged2D(img_rows, img_cols);
double **gauss_kernel = getGaussianKernel(ker_rows,ker_cols,2,2);
conv(src, img_rows, img_cols, gauss_kernel, ker_rows, ker_cols, smoothed_img);
Mat res_smoothed = array2Img(smoothed_img, img_rows, img_cols);
imwrite( "GaussFiltering_result.jpg", res_smoothed);
/** Test on the GPU Gaussian Filtering Kernel on Global Memory */
// Show some related infomation regarding the GPU
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
hipSetDevice(dev);
hipDeviceProp_t devProps;
if (hipGetDeviceProperties(&devProps, dev) == 0)
{
printf("Using device %d:\n", dev);
printf("%s; Global Memory: %fGB; Shared Memory/block: %lu KB; Compute v%d.%d; Clock: %f GHz\n",
devProps.name, (float)devProps.totalGlobalMem / (1024*1024*1024),
devProps.sharedMemPerBlock/(1024), (int)devProps.major, (int)devProps.minor,
(float)devProps.clockRate/(1000*1000));
}
double **dstg = cudaMallocManaged2D(img_rows, img_cols);
// Block dim: 32 x 32
const unsigned num_threads_row = MAX_2D_THREADS_PER_BLOCK;
const unsigned num_threads_col = MAX_2D_THREADS_PER_BLOCK;
// Assign the number of blocks
const unsigned num_blocks_row = (img_rows + num_threads_row) / num_threads_row;
const unsigned num_blocks_col = (img_cols + num_threads_col) / num_threads_col;
const dim3 num_blocks (num_blocks_col, num_blocks_row);
const dim3 num_threads (num_threads_col, num_threads_row);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// No shared memory
for(int i = 0; i < 100; ++i){
hipLaunchKernelGGL(( convGPUGlobal), dim3(num_blocks), dim3(num_threads) , 0, 0,
src, img_rows, img_cols, gauss_kernel, ker_rows, ker_cols, dstg);
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
fprintf(stdout, "Done Gaussian-Global on GPU.\n");
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(stdout, "Time elapsed: %f ms\n", elapsedTime/100);
cv::Mat resg = array2Img(dstg, img_rows, img_cols);
imwrite( "Smoothed_Image_GPU.jpg", resg);
/** Test on the GPU Gaussian Filtering Kernel on Shared Memory */
double **dstgs = cudaMallocManaged2D(img_rows, img_cols);
// Block dim: H x (32 + 2 * Apron); Total number of threads < 1024
const unsigned tile_cols = MAX_2D_THREADS_PER_BLOCK + 2 * offset_cols;
const unsigned tile_rows = MAX_2D_THREADS_PER_BLOCK + 2 * offset_rows;
const unsigned num_threads_col_s = tile_cols; // blockDim.x
const unsigned num_threads_row_s = MAX_THREADS_PER_BLOCK / num_threads_col_s; // blockDim.y
// Assign the number of blocks
const unsigned num_blocks_col_s = (img_cols + MAX_2D_THREADS_PER_BLOCK) / MAX_2D_THREADS_PER_BLOCK;
const unsigned num_blocks_row_large_s = (img_rows + MAX_2D_THREADS_PER_BLOCK) / MAX_2D_THREADS_PER_BLOCK;
const unsigned sub_blocks_per_large = (tile_rows + num_threads_row_s) / num_threads_row_s;
const unsigned num_blocks_row_s = num_blocks_row_large_s * sub_blocks_per_large;
const dim3 num_blocks_s (num_blocks_col_s, num_blocks_row_s);
const dim3 num_threads_s (num_threads_col_s, num_threads_row_s);
const int TILE_BYTES = sizeof(double) * tile_cols * tile_rows;
const int KERN_BYTES = sizeof(double) * ker_cols * ker_rows;
hipEvent_t start_s, stop_s;
hipEventCreate(&start_s);
hipEventCreate(&stop_s);
hipEventRecord(start_s, 0);
for(int i = 0; i < 10; ++i){
hipLaunchKernelGGL(( convGPUShared), dim3(num_blocks_s), dim3(num_threads_s), TILE_BYTES + KERN_BYTES , 0,
src, img_rows, img_cols, gauss_kernel, ker_rows, ker_cols, dstgs);
}
hipEventRecord(stop_s, 0);
hipEventSynchronize(stop_s);
fprintf(stdout, "Done Gaussian-Shared on GPU.\n");
float elapsedTime_g;
hipEventElapsedTime(&elapsedTime_g, start_s, stop_s);
fprintf(stdout, "Time elapsed: %f ms\n", elapsedTime_g/10);
cv::Mat resgs = array2Img(dstgs, img_rows, img_cols);
imwrite( "Smoothed_Image_GPUs.jpg", resgs);
/** Test on 1D GPU Gaussian Filtering Kernels on Shared Memory */
double *gauss_1d_kernel = get1DGaussianKernel(ker_rows,2);
double **dstgr = cudaMallocManaged2D(img_rows, img_cols);
double **dstgc = cudaMallocManaged2D(img_rows, img_cols);
// For 1D Filtering: Share memory size: tile x 32; Thread block size: 32 x 32
// The number of blocks are also the same as the global one
const int TILE_BYTES_COLS = sizeof(double) * tile_cols * num_threads_row;
const int TILE_BYTES_ROWS = sizeof(double) * tile_rows * num_threads_col;
const int KERN_BYTES_1D = sizeof(double) * ker_rows;
hipEvent_t start_1d, stop_1d;
hipEventCreate(&start_1d);
hipEventCreate(&stop_1d);
hipEventRecord(start_1d, 0);
for(int i = 0; i < 10; ++i){
hipLaunchKernelGGL(( convGPUCol), dim3(num_blocks), dim3(num_threads), TILE_BYTES_COLS + KERN_BYTES_1D , 0,
src, img_rows, img_cols, gauss_1d_kernel, ker_cols, dstgc);
hipLaunchKernelGGL(( convGPURow), dim3(num_blocks), dim3(num_threads), TILE_BYTES_ROWS + KERN_BYTES_1D , 0,
dstgc, img_rows, img_cols, gauss_1d_kernel, ker_rows, dstgr);
}
hipEventRecord(stop_1d, 0);
hipEventSynchronize(stop_1d);
fprintf(stdout, "Done Seperate Gaussian-Shared on GPU.\n");
float elapsedTime_1d;
hipEventElapsedTime(&elapsedTime_1d, start_1d, stop_1d);
fprintf(stdout, "Time elapsed: %f ms\n", elapsedTime_1d/10);
cv::Mat resg1d = array2Img(dstgr, img_rows, img_cols);
imwrite( "Smoothed_Image_GPUs-1d.jpg", resg1d);
/** Double threshold test */
double lo = 0.008;
double hi = 0.08;
double **edge_map = cudaMallocManaged2D(img_rows, img_cols);
doubleThreshold(dstgr, img_rows, img_cols, lo, hi, edge_map);
Mat res_edge = array2Img(edge_map, img_rows, img_cols);
imwrite("edge_result.jpg", res_edge);
/** Distance transform test */
double **dist_map = cudaMallocManaged2D(img_rows, img_cols);
distTrans(src, img_rows, img_cols ,dist_map);
Mat res_dist = array2Img(dist_map, img_rows, img_cols);
imwrite("distTrans_result.jpg", res_dist);
/** Image dilation test */
double **dilated_img = cudaMallocManaged2D(img_rows, img_cols);
dilate(dist_map, img_rows, img_cols, 2, dilated_img);
Mat res_dilated = array2Img(dilated_img, img_rows, img_cols);
imwrite( "dilation_result.jpg", res_dilated);
/** Search matching test*/
double **matched_map = cudaMallocManaged2D(img_rows, img_cols);
printf("Search matching on CPU ");
conv(dilated_img, img_rows, img_cols, T, tmp_rows, tmp_cols, matched_map);
Mat res_matched = array2Img(matched_map, img_rows, img_cols);
imwrite( "search_result.jpg", res_matched);
hipEventCreate(&start_s);
hipEventCreate(&stop_s);
hipEventRecord(start_s, 0);
for(int i = 0; i < 10; ++i){
hipLaunchKernelGGL(( convGPUShared), dim3(num_blocks_s), dim3(num_threads_s), TILE_BYTES + KERN_BYTES , 0,
src, img_rows, img_cols, T, tmp_rows, tmp_cols, matched_map);
}
hipEventRecord(stop_s, 0);
hipEventSynchronize(stop_s);
elapsedTime_g;
hipEventElapsedTime(&elapsedTime_g, start_s, stop_s);
fprintf(stdout, "Search matching on GPU: %f ms\n", elapsedTime_g/10);
res_matched = array2Img(matched_map, img_rows, img_cols);
imwrite( "search_result.jpg", res_matched);
/** Non maximum supression test */
int t_rows = 10;
int t_cols = 10;
double p = 0.9;
double **nms_map = memAlloc2D(img_rows, img_cols);
nonMaxSupression(matched_map, img_rows, img_cols, t_rows, t_cols, p, nms_map);
Mat res_nms = array2Img(nms_map, img_rows, img_cols);
imwrite( "nms_result.jpg", res_nms);
/** Draw the matched result*/
drawBox(nms_map, img_rows, img_cols, tmp_rows, tmp_cols, image_rgb);
// Mat final_res = array2Img(src, img_rows, img_cols);
imwrite("../result/final_result.jpg",image_rgb);
return 0;
}
| b781b7cd19532d0c13fe60b8183d4f6eef6ec724.cu | #include "improc.h"
#include "gpukernels.h"
#include <cstdio>
using namespace cv;
using namespace std;
#define MAX_2D_THREADS_PER_BLOCK 32
#define MAX_THREADS_PER_BLOCK 1024
int main(int argc, char** argv)
{
char* imageName = argv[1];
char* templName = argv[2];
Mat image_rgb = imread(imageName, 1);
Mat image = imread(imageName, 0);
Mat templ = imread(templName, 0);
const int img_rows = image.rows;
const int img_cols = image.cols;
const int tmp_rows = templ.rows;
const int tmp_cols = templ.cols;
const int ker_rows = 15;
const int ker_cols = 15;
const int offset_rows = ker_rows / 2; // 3 -> 1, 4 -> 2, 5 -> 2, also the size of apron
const int offset_cols = ker_cols / 2; // 3 -> 1, 4 -> 2, 5 -> 2
/** Load the image using OpenCV */
double **src = img2Array(image);
double **T = img2Array(templ);
/** Gaussian filtering on CPU test */
double **smoothed_img = cudaMallocManaged2D(img_rows, img_cols);
double **gauss_kernel = getGaussianKernel(ker_rows,ker_cols,2,2);
conv(src, img_rows, img_cols, gauss_kernel, ker_rows, ker_cols, smoothed_img);
Mat res_smoothed = array2Img(smoothed_img, img_rows, img_cols);
imwrite( "GaussFiltering_result.jpg", res_smoothed);
/** Test on the GPU Gaussian Filtering Kernel on Global Memory */
// Show some related infomation regarding the GPU
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp devProps;
if (cudaGetDeviceProperties(&devProps, dev) == 0)
{
printf("Using device %d:\n", dev);
printf("%s; Global Memory: %fGB; Shared Memory/block: %lu KB; Compute v%d.%d; Clock: %f GHz\n",
devProps.name, (float)devProps.totalGlobalMem / (1024*1024*1024),
devProps.sharedMemPerBlock/(1024), (int)devProps.major, (int)devProps.minor,
(float)devProps.clockRate/(1000*1000));
}
double **dstg = cudaMallocManaged2D(img_rows, img_cols);
// Block dim: 32 x 32
const unsigned num_threads_row = MAX_2D_THREADS_PER_BLOCK;
const unsigned num_threads_col = MAX_2D_THREADS_PER_BLOCK;
// Assign the number of blocks
const unsigned num_blocks_row = (img_rows + num_threads_row) / num_threads_row;
const unsigned num_blocks_col = (img_cols + num_threads_col) / num_threads_col;
const dim3 num_blocks (num_blocks_col, num_blocks_row);
const dim3 num_threads (num_threads_col, num_threads_row);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// No shared memory
for(int i = 0; i < 100; ++i){
convGPUGlobal<<< num_blocks, num_threads >>>
(src, img_rows, img_cols, gauss_kernel, ker_rows, ker_cols, dstg);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
fprintf(stdout, "Done Gaussian-Global on GPU.\n");
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(stdout, "Time elapsed: %f ms\n", elapsedTime/100);
cv::Mat resg = array2Img(dstg, img_rows, img_cols);
imwrite( "Smoothed_Image_GPU.jpg", resg);
/** Test on the GPU Gaussian Filtering Kernel on Shared Memory */
double **dstgs = cudaMallocManaged2D(img_rows, img_cols);
// Block dim: H x (32 + 2 * Apron); Total number of threads < 1024
const unsigned tile_cols = MAX_2D_THREADS_PER_BLOCK + 2 * offset_cols;
const unsigned tile_rows = MAX_2D_THREADS_PER_BLOCK + 2 * offset_rows;
const unsigned num_threads_col_s = tile_cols; // blockDim.x
const unsigned num_threads_row_s = MAX_THREADS_PER_BLOCK / num_threads_col_s; // blockDim.y
// Assign the number of blocks
const unsigned num_blocks_col_s = (img_cols + MAX_2D_THREADS_PER_BLOCK) / MAX_2D_THREADS_PER_BLOCK;
const unsigned num_blocks_row_large_s = (img_rows + MAX_2D_THREADS_PER_BLOCK) / MAX_2D_THREADS_PER_BLOCK;
const unsigned sub_blocks_per_large = (tile_rows + num_threads_row_s) / num_threads_row_s;
const unsigned num_blocks_row_s = num_blocks_row_large_s * sub_blocks_per_large;
const dim3 num_blocks_s (num_blocks_col_s, num_blocks_row_s);
const dim3 num_threads_s (num_threads_col_s, num_threads_row_s);
const int TILE_BYTES = sizeof(double) * tile_cols * tile_rows;
const int KERN_BYTES = sizeof(double) * ker_cols * ker_rows;
cudaEvent_t start_s, stop_s;
cudaEventCreate(&start_s);
cudaEventCreate(&stop_s);
cudaEventRecord(start_s, 0);
for(int i = 0; i < 10; ++i){
convGPUShared<<< num_blocks_s, num_threads_s, TILE_BYTES + KERN_BYTES >>>
(src, img_rows, img_cols, gauss_kernel, ker_rows, ker_cols, dstgs);
}
cudaEventRecord(stop_s, 0);
cudaEventSynchronize(stop_s);
fprintf(stdout, "Done Gaussian-Shared on GPU.\n");
float elapsedTime_g;
cudaEventElapsedTime(&elapsedTime_g, start_s, stop_s);
fprintf(stdout, "Time elapsed: %f ms\n", elapsedTime_g/10);
cv::Mat resgs = array2Img(dstgs, img_rows, img_cols);
imwrite( "Smoothed_Image_GPUs.jpg", resgs);
/** Test on 1D GPU Gaussian Filtering Kernels on Shared Memory */
double *gauss_1d_kernel = get1DGaussianKernel(ker_rows,2);
double **dstgr = cudaMallocManaged2D(img_rows, img_cols);
double **dstgc = cudaMallocManaged2D(img_rows, img_cols);
// For 1D Filtering: Share memory size: tile x 32; Thread block size: 32 x 32
// The number of blocks are also the same as the global one
const int TILE_BYTES_COLS = sizeof(double) * tile_cols * num_threads_row;
const int TILE_BYTES_ROWS = sizeof(double) * tile_rows * num_threads_col;
const int KERN_BYTES_1D = sizeof(double) * ker_rows;
cudaEvent_t start_1d, stop_1d;
cudaEventCreate(&start_1d);
cudaEventCreate(&stop_1d);
cudaEventRecord(start_1d, 0);
for(int i = 0; i < 10; ++i){
convGPUCol<<< num_blocks, num_threads, TILE_BYTES_COLS + KERN_BYTES_1D >>>
(src, img_rows, img_cols, gauss_1d_kernel, ker_cols, dstgc);
convGPURow<<< num_blocks, num_threads, TILE_BYTES_ROWS + KERN_BYTES_1D >>>
(dstgc, img_rows, img_cols, gauss_1d_kernel, ker_rows, dstgr);
}
cudaEventRecord(stop_1d, 0);
cudaEventSynchronize(stop_1d);
fprintf(stdout, "Done Seperate Gaussian-Shared on GPU.\n");
float elapsedTime_1d;
cudaEventElapsedTime(&elapsedTime_1d, start_1d, stop_1d);
fprintf(stdout, "Time elapsed: %f ms\n", elapsedTime_1d/10);
cv::Mat resg1d = array2Img(dstgr, img_rows, img_cols);
imwrite( "Smoothed_Image_GPUs-1d.jpg", resg1d);
/** Double threshold test */
double lo = 0.008;
double hi = 0.08;
double **edge_map = cudaMallocManaged2D(img_rows, img_cols);
doubleThreshold(dstgr, img_rows, img_cols, lo, hi, edge_map);
Mat res_edge = array2Img(edge_map, img_rows, img_cols);
imwrite("edge_result.jpg", res_edge);
/** Distance transform test */
double **dist_map = cudaMallocManaged2D(img_rows, img_cols);
distTrans(src, img_rows, img_cols ,dist_map);
Mat res_dist = array2Img(dist_map, img_rows, img_cols);
imwrite("distTrans_result.jpg", res_dist);
/** Image dilation test */
double **dilated_img = cudaMallocManaged2D(img_rows, img_cols);
dilate(dist_map, img_rows, img_cols, 2, dilated_img);
Mat res_dilated = array2Img(dilated_img, img_rows, img_cols);
imwrite( "dilation_result.jpg", res_dilated);
/** Search matching test*/
double **matched_map = cudaMallocManaged2D(img_rows, img_cols);
printf("Search matching on CPU ");
conv(dilated_img, img_rows, img_cols, T, tmp_rows, tmp_cols, matched_map);
Mat res_matched = array2Img(matched_map, img_rows, img_cols);
imwrite( "search_result.jpg", res_matched);
cudaEventCreate(&start_s);
cudaEventCreate(&stop_s);
cudaEventRecord(start_s, 0);
for(int i = 0; i < 10; ++i){
convGPUShared<<< num_blocks_s, num_threads_s, TILE_BYTES + KERN_BYTES >>>
(src, img_rows, img_cols, T, tmp_rows, tmp_cols, matched_map);
}
cudaEventRecord(stop_s, 0);
cudaEventSynchronize(stop_s);
elapsedTime_g;
cudaEventElapsedTime(&elapsedTime_g, start_s, stop_s);
fprintf(stdout, "Search matching on GPU: %f ms\n", elapsedTime_g/10);
res_matched = array2Img(matched_map, img_rows, img_cols);
imwrite( "search_result.jpg", res_matched);
/** Non maximum supression test */
int t_rows = 10;
int t_cols = 10;
double p = 0.9;
double **nms_map = memAlloc2D(img_rows, img_cols);
nonMaxSupression(matched_map, img_rows, img_cols, t_rows, t_cols, p, nms_map);
Mat res_nms = array2Img(nms_map, img_rows, img_cols);
imwrite( "nms_result.jpg", res_nms);
/** Draw the matched result*/
drawBox(nms_map, img_rows, img_cols, tmp_rows, tmp_cols, image_rgb);
// Mat final_res = array2Img(src, img_rows, img_cols);
imwrite("../result/final_result.jpg",image_rgb);
return 0;
}
|
64fa10967307418e60807c16a63387de584b62a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "rw_cuda.h"
#include <ATen/hip/HIPContext.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "utils.cuh"
#define THREADS 1024
#define BLOCKS(N) (N + THREADS - 1) / THREADS
__global__ void uniform_sampling_kernel(const int64_t *rowptr,
const int64_t *col,
const int64_t *start, const float *rand,
int64_t *n_out, int64_t *e_out,
const int64_t walk_length,
const int64_t numel) {
const int64_t thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_idx < numel) {
int64_t n_cur = start[thread_idx], e_cur, row_start, row_end, rnd;
n_out[thread_idx] = n_cur;
for (int64_t l = 0; l < walk_length; l++) {
row_start = rowptr[n_cur], row_end = rowptr[n_cur + 1];
if (row_end - row_start == 0) {
e_cur = -1;
} else {
rnd = int64_t(rand[l * numel + thread_idx] * (row_end - row_start));
e_cur = row_start + rnd;
n_cur = col[e_cur];
}
n_out[(l + 1) * numel + thread_idx] = n_cur;
e_out[l * numel + thread_idx] = e_cur;
}
}
}
__global__ void
rejection_sampling_kernel(unsigned int seed, const int64_t *rowptr,
const int64_t *col, const int64_t *start,
int64_t *n_out, int64_t *e_out,
const int64_t walk_length, const int64_t numel,
const double p, const double q) {
hiprandState_t state;
hiprand_init(seed, 0, 0, &state);
double max_prob = fmax(fmax(1. / p, 1.), 1. / q);
double prob_0 = 1. / p / max_prob;
double prob_1 = 1. / max_prob;
double prob_2 = 1. / q / max_prob;
const int64_t thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_idx < numel) {
int64_t t = start[thread_idx], v, x, e_cur, row_start, row_end;
n_out[thread_idx] = t;
row_start = rowptr[t], row_end = rowptr[t + 1];
if (row_end - row_start == 0) {
e_cur = -1;
v = t;
} else {
e_cur = row_start + (hiprand(&state) % (row_end - row_start));
v = col[e_cur];
}
n_out[numel + thread_idx] = v;
e_out[thread_idx] = e_cur;
for (int64_t l = 1; l < walk_length; l++) {
row_start = rowptr[v], row_end = rowptr[v + 1];
if (row_end - row_start == 0) {
e_cur = -1;
x = v;
} else if (row_end - row_start == 1) {
e_cur = row_start;
x = col[e_cur];
} else {
while (true) {
e_cur = row_start + (hiprand(&state) % (row_end - row_start));
x = col[e_cur];
double r = hiprand_uniform(&state); // (0, 1]
if (x == t && r < prob_0)
break;
bool is_neighbor = false;
row_start = rowptr[x], row_end = rowptr[x + 1];
for (int64_t i = row_start; i < row_end; i++) {
if (col[i] == t) {
is_neighbor = true;
break;
}
}
if (is_neighbor && r < prob_1)
break;
else if (r < prob_2)
break;
}
}
n_out[(l + 1) * numel + thread_idx] = x;
e_out[l * numel + thread_idx] = e_cur;
t = v;
v = x;
}
}
}
std::tuple<torch::Tensor, torch::Tensor>
random_walk_cuda(torch::Tensor rowptr, torch::Tensor col, torch::Tensor start,
int64_t walk_length, double p, double q) {
CHECK_CUDA(rowptr);
CHECK_CUDA(col);
CHECK_CUDA(start);
hipSetDevice(rowptr.get_device());
CHECK_INPUT(rowptr.dim() == 1);
CHECK_INPUT(col.dim() == 1);
CHECK_INPUT(start.dim() == 1);
auto n_out = torch::empty({walk_length + 1, start.size(0)}, start.options());
auto e_out = torch::empty({walk_length, start.size(0)}, start.options());
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (p == 1. && q == 1.) {
auto rand = torch::rand({start.size(0), walk_length},
start.options().dtype(torch::kFloat));
hipLaunchKernelGGL(( uniform_sampling_kernel), dim3(BLOCKS(start.numel())), dim3(THREADS), 0, stream,
rowptr.data_ptr<int64_t>(), col.data_ptr<int64_t>(),
start.data_ptr<int64_t>(), rand.data_ptr<float>(),
n_out.data_ptr<int64_t>(), e_out.data_ptr<int64_t>(), walk_length,
start.numel());
} else {
hipLaunchKernelGGL(( rejection_sampling_kernel), dim3(BLOCKS(start.numel())), dim3(THREADS), 0, stream,
time(NULL), rowptr.data_ptr<int64_t>(), col.data_ptr<int64_t>(),
start.data_ptr<int64_t>(), n_out.data_ptr<int64_t>(),
e_out.data_ptr<int64_t>(), walk_length, start.numel(), p, q);
}
return std::make_tuple(n_out.t().contiguous(), e_out.t().contiguous());
}
| 64fa10967307418e60807c16a63387de584b62a8.cu | #include "rw_cuda.h"
#include <ATen/cuda/CUDAContext.h>
#include <curand.h>
#include <curand_kernel.h>
#include "utils.cuh"
#define THREADS 1024
#define BLOCKS(N) (N + THREADS - 1) / THREADS
__global__ void uniform_sampling_kernel(const int64_t *rowptr,
const int64_t *col,
const int64_t *start, const float *rand,
int64_t *n_out, int64_t *e_out,
const int64_t walk_length,
const int64_t numel) {
const int64_t thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_idx < numel) {
int64_t n_cur = start[thread_idx], e_cur, row_start, row_end, rnd;
n_out[thread_idx] = n_cur;
for (int64_t l = 0; l < walk_length; l++) {
row_start = rowptr[n_cur], row_end = rowptr[n_cur + 1];
if (row_end - row_start == 0) {
e_cur = -1;
} else {
rnd = int64_t(rand[l * numel + thread_idx] * (row_end - row_start));
e_cur = row_start + rnd;
n_cur = col[e_cur];
}
n_out[(l + 1) * numel + thread_idx] = n_cur;
e_out[l * numel + thread_idx] = e_cur;
}
}
}
__global__ void
rejection_sampling_kernel(unsigned int seed, const int64_t *rowptr,
const int64_t *col, const int64_t *start,
int64_t *n_out, int64_t *e_out,
const int64_t walk_length, const int64_t numel,
const double p, const double q) {
curandState_t state;
curand_init(seed, 0, 0, &state);
double max_prob = fmax(fmax(1. / p, 1.), 1. / q);
double prob_0 = 1. / p / max_prob;
double prob_1 = 1. / max_prob;
double prob_2 = 1. / q / max_prob;
const int64_t thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_idx < numel) {
int64_t t = start[thread_idx], v, x, e_cur, row_start, row_end;
n_out[thread_idx] = t;
row_start = rowptr[t], row_end = rowptr[t + 1];
if (row_end - row_start == 0) {
e_cur = -1;
v = t;
} else {
e_cur = row_start + (curand(&state) % (row_end - row_start));
v = col[e_cur];
}
n_out[numel + thread_idx] = v;
e_out[thread_idx] = e_cur;
for (int64_t l = 1; l < walk_length; l++) {
row_start = rowptr[v], row_end = rowptr[v + 1];
if (row_end - row_start == 0) {
e_cur = -1;
x = v;
} else if (row_end - row_start == 1) {
e_cur = row_start;
x = col[e_cur];
} else {
while (true) {
e_cur = row_start + (curand(&state) % (row_end - row_start));
x = col[e_cur];
double r = curand_uniform(&state); // (0, 1]
if (x == t && r < prob_0)
break;
bool is_neighbor = false;
row_start = rowptr[x], row_end = rowptr[x + 1];
for (int64_t i = row_start; i < row_end; i++) {
if (col[i] == t) {
is_neighbor = true;
break;
}
}
if (is_neighbor && r < prob_1)
break;
else if (r < prob_2)
break;
}
}
n_out[(l + 1) * numel + thread_idx] = x;
e_out[l * numel + thread_idx] = e_cur;
t = v;
v = x;
}
}
}
std::tuple<torch::Tensor, torch::Tensor>
random_walk_cuda(torch::Tensor rowptr, torch::Tensor col, torch::Tensor start,
int64_t walk_length, double p, double q) {
CHECK_CUDA(rowptr);
CHECK_CUDA(col);
CHECK_CUDA(start);
cudaSetDevice(rowptr.get_device());
CHECK_INPUT(rowptr.dim() == 1);
CHECK_INPUT(col.dim() == 1);
CHECK_INPUT(start.dim() == 1);
auto n_out = torch::empty({walk_length + 1, start.size(0)}, start.options());
auto e_out = torch::empty({walk_length, start.size(0)}, start.options());
auto stream = at::cuda::getCurrentCUDAStream();
if (p == 1. && q == 1.) {
auto rand = torch::rand({start.size(0), walk_length},
start.options().dtype(torch::kFloat));
uniform_sampling_kernel<<<BLOCKS(start.numel()), THREADS, 0, stream>>>(
rowptr.data_ptr<int64_t>(), col.data_ptr<int64_t>(),
start.data_ptr<int64_t>(), rand.data_ptr<float>(),
n_out.data_ptr<int64_t>(), e_out.data_ptr<int64_t>(), walk_length,
start.numel());
} else {
rejection_sampling_kernel<<<BLOCKS(start.numel()), THREADS, 0, stream>>>(
time(NULL), rowptr.data_ptr<int64_t>(), col.data_ptr<int64_t>(),
start.data_ptr<int64_t>(), n_out.data_ptr<int64_t>(),
e_out.data_ptr<int64_t>(), walk_length, start.numel(), p, q);
}
return std::make_tuple(n_out.t().contiguous(), e_out.t().contiguous());
}
|
518adee03ce5e8ce3995f76951048ba7de20cf78.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _PRESCAN_CU_
#define _PRESCAN_CU_
// includes, kernels
#include <assert.h>
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
// Lab4: You can use any other block size you wish.
#define BLOCK_SIZE 1024 //256
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
// Lab4: Host Helper Functions (allocate your own data structure...)
// Lab4: Device Functions
// Lab4: Kernel Functions
__global__ void scan_workefficient(float *g_odata, float *g_idata, float *g_sums, int n, int blockID_offset)
{
// Dynamically allocated shared memory for scan kernels
extern __shared__ float temp[];
int thid = threadIdx.x;
int bid = blockIdx.x + blockID_offset;
int offset = 1;
// Cache the computational window in shared memory
int block_offset = BLOCK_SIZE*bid;
temp[2*thid] = g_idata[2*(thid+block_offset)];
temp[2*thid+1] = g_idata[2*(thid+block_offset)+1];
int ai = thid;
int bi = thid + (n/2);
// // compute spacing to avoid bank conflicts
// int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
// int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// // Cache the computational window in shared memory
// temp[ai + bankOffsetA] = g_idata[ai];
// temp[bi + bankOffsetB] = g_idata[bi];
// build the sum in place up the tree
for (int d = n>>1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
// ai += ai/NUM_BANKS;
// bi += bi/NUM_BANKS;
temp[bi] += temp[ai];
}
offset *= 2;
}
// scan back down the tree
// clear the last element
if (thid == 0)
{
if(g_sums){
//g_sums[0] = 0;
g_sums[bid] = temp[n - 1];
}
temp[n - 1] = 0;
}
// traverse down the tree building the scan in place
for (int d = 1; d < n; d *= 2)
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
// ai += ai/NUM_BANKS;
// bi += bi/NUM_BANKS;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
// write results to global memory
g_odata[2*(thid+block_offset)] = temp[2*thid];
g_odata[2*(thid+block_offset)+1] = temp[2*thid+1];
}
__global__ void consolidate(float *g_odata, float *g_sums)
{
int thid = threadIdx.x;
// write results to global memory
g_odata[2*(thid+BLOCK_SIZE)] += g_sums[0];
g_odata[2*(thid+BLOCK_SIZE)+1] += g_sums[0];
// g_odata[2*(thid+BLOCK_SIZE)] += g_incr[1];
// g_odata[2*(thid+BLOCK_SIZE)+1] += g_incr[1];
}
__global__ void update(float *g_odata, float *g_incr, int blockID_offset)
{
int thid = threadIdx.x;
int bid = blockIdx.x + blockID_offset;
// Cache the computational window in shared memory
int block_offset = BLOCK_SIZE*bid;
g_odata[2*(thid+block_offset)] += g_incr[bid];
g_odata[2*(thid+block_offset)+1] += g_incr[bid];
}
// **===-------- Lab4: Modify the body of this function -----------===**
// You may need to make multiple kernel calls, make your own kernel
// function in this file, and then call them from here.
void prescanArray(float *outArray, float *inArray, float *sums, float *incr, float *incr_sums, float *incr_incr, int numElements)
{
int num_blocks = numElements/(BLOCK_SIZE*2);
if(num_blocks==0){
num_blocks = 1;
}
if(num_blocks>2048){
num_blocks=2048;
}
//first scan individual blocks
hipLaunchKernelGGL(( scan_workefficient), dim3(num_blocks),dim3(BLOCK_SIZE),8192, 0, outArray, inArray, sums, BLOCK_SIZE*2, 0);
hipLaunchKernelGGL(( scan_workefficient), dim3(num_blocks),dim3(BLOCK_SIZE),8192, 0, outArray, inArray, sums, BLOCK_SIZE*2, 2048);
hipLaunchKernelGGL(( scan_workefficient), dim3(num_blocks),dim3(BLOCK_SIZE),8192, 0, outArray, inArray, sums, BLOCK_SIZE*2, 4096);
hipLaunchKernelGGL(( scan_workefficient), dim3(num_blocks),dim3(BLOCK_SIZE),8192, 0, outArray, inArray, sums, BLOCK_SIZE*2, 6144);
//at this point, sums is ready to be scanned
hipLaunchKernelGGL(( scan_workefficient), dim3(4),dim3(BLOCK_SIZE),8192, 0, incr, sums, incr_sums, num_blocks, 0);
hipLaunchKernelGGL(( scan_workefficient), dim3(1),dim3(BLOCK_SIZE),8192, 0, incr_incr, incr_sums, NULL, 4, 0);
hipLaunchKernelGGL(( update), dim3(4), dim3(BLOCK_SIZE), 8192, 0, incr, incr_incr, 0);
hipLaunchKernelGGL(( update), dim3(num_blocks), dim3(BLOCK_SIZE), 8192, 0, outArray, incr, 0);
hipLaunchKernelGGL(( update), dim3(num_blocks), dim3(BLOCK_SIZE), 8192, 0, outArray, incr, 2048);
hipLaunchKernelGGL(( update), dim3(num_blocks), dim3(BLOCK_SIZE), 8192, 0, outArray, incr, 4096);
hipLaunchKernelGGL(( update), dim3(num_blocks), dim3(BLOCK_SIZE), 8192, 0, outArray, incr, 6144);
}
// **===-----------------------------------------------------------===**
#endif // _PRESCAN_CU_
| 518adee03ce5e8ce3995f76951048ba7de20cf78.cu | #ifndef _PRESCAN_CU_
#define _PRESCAN_CU_
// includes, kernels
#include <assert.h>
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
// Lab4: You can use any other block size you wish.
#define BLOCK_SIZE 1024 //256
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
// Lab4: Host Helper Functions (allocate your own data structure...)
// Lab4: Device Functions
// Lab4: Kernel Functions
__global__ void scan_workefficient(float *g_odata, float *g_idata, float *g_sums, int n, int blockID_offset)
{
// Dynamically allocated shared memory for scan kernels
extern __shared__ float temp[];
int thid = threadIdx.x;
int bid = blockIdx.x + blockID_offset;
int offset = 1;
// Cache the computational window in shared memory
int block_offset = BLOCK_SIZE*bid;
temp[2*thid] = g_idata[2*(thid+block_offset)];
temp[2*thid+1] = g_idata[2*(thid+block_offset)+1];
int ai = thid;
int bi = thid + (n/2);
// // compute spacing to avoid bank conflicts
// int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
// int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// // Cache the computational window in shared memory
// temp[ai + bankOffsetA] = g_idata[ai];
// temp[bi + bankOffsetB] = g_idata[bi];
// build the sum in place up the tree
for (int d = n>>1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
// ai += ai/NUM_BANKS;
// bi += bi/NUM_BANKS;
temp[bi] += temp[ai];
}
offset *= 2;
}
// scan back down the tree
// clear the last element
if (thid == 0)
{
if(g_sums){
//g_sums[0] = 0;
g_sums[bid] = temp[n - 1];
}
temp[n - 1] = 0;
}
// traverse down the tree building the scan in place
for (int d = 1; d < n; d *= 2)
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
// ai += ai/NUM_BANKS;
// bi += bi/NUM_BANKS;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
// write results to global memory
g_odata[2*(thid+block_offset)] = temp[2*thid];
g_odata[2*(thid+block_offset)+1] = temp[2*thid+1];
}
__global__ void consolidate(float *g_odata, float *g_sums)
{
int thid = threadIdx.x;
// write results to global memory
g_odata[2*(thid+BLOCK_SIZE)] += g_sums[0];
g_odata[2*(thid+BLOCK_SIZE)+1] += g_sums[0];
// g_odata[2*(thid+BLOCK_SIZE)] += g_incr[1];
// g_odata[2*(thid+BLOCK_SIZE)+1] += g_incr[1];
}
__global__ void update(float *g_odata, float *g_incr, int blockID_offset)
{
int thid = threadIdx.x;
int bid = blockIdx.x + blockID_offset;
// Cache the computational window in shared memory
int block_offset = BLOCK_SIZE*bid;
g_odata[2*(thid+block_offset)] += g_incr[bid];
g_odata[2*(thid+block_offset)+1] += g_incr[bid];
}
// **===-------- Lab4: Modify the body of this function -----------===**
// You may need to make multiple kernel calls, make your own kernel
// function in this file, and then call them from here.
void prescanArray(float *outArray, float *inArray, float *sums, float *incr, float *incr_sums, float *incr_incr, int numElements)
{
int num_blocks = numElements/(BLOCK_SIZE*2);
if(num_blocks==0){
num_blocks = 1;
}
if(num_blocks>2048){
num_blocks=2048;
}
//first scan individual blocks
scan_workefficient<<<num_blocks,BLOCK_SIZE,8192>>>(outArray, inArray, sums, BLOCK_SIZE*2, 0);
scan_workefficient<<<num_blocks,BLOCK_SIZE,8192>>>(outArray, inArray, sums, BLOCK_SIZE*2, 2048);
scan_workefficient<<<num_blocks,BLOCK_SIZE,8192>>>(outArray, inArray, sums, BLOCK_SIZE*2, 4096);
scan_workefficient<<<num_blocks,BLOCK_SIZE,8192>>>(outArray, inArray, sums, BLOCK_SIZE*2, 6144);
//at this point, sums is ready to be scanned
scan_workefficient<<<4,BLOCK_SIZE,8192>>>(incr, sums, incr_sums, num_blocks, 0);
scan_workefficient<<<1,BLOCK_SIZE,8192>>>(incr_incr, incr_sums, NULL, 4, 0);
update<<<4, BLOCK_SIZE, 8192>>>(incr, incr_incr, 0);
update<<<num_blocks, BLOCK_SIZE, 8192>>>(outArray, incr, 0);
update<<<num_blocks, BLOCK_SIZE, 8192>>>(outArray, incr, 2048);
update<<<num_blocks, BLOCK_SIZE, 8192>>>(outArray, incr, 4096);
update<<<num_blocks, BLOCK_SIZE, 8192>>>(outArray, incr, 6144);
}
// **===-----------------------------------------------------------===**
#endif // _PRESCAN_CU_
|
e0696c773bcc4596827f36539c0d6c846253d778.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/hip/Pow.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/Pow.h>
#include <c10/core/Scalar.h>
namespace at { namespace native {
// Forward declare some unary kernels
void rsqrt_kernel_cuda(TensorIteratorBase& iter);
void sqrt_kernel_cuda(TensorIteratorBase& iter);
void reciprocal_kernel_cuda(TensorIteratorBase& iter);
namespace {
void pow_tensor_scalar_kernel(TensorIteratorBase& iter, const Scalar& exp_scalar);
template <typename scalar_t>
void pow_scalar_tensor_impl(TensorIteratorBase& iter, scalar_t base) {
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t exp) -> scalar_t {
return pow_(base, exp);
});
}
template <typename value_t>
void pow_scalar_tensor_impl(TensorIteratorBase& iter, c10::complex<value_t> base) {
// For complex, thrust::pow uses the identity
// pow(a, b) = exp(log(a) * b)
const auto fct = ::log(base);
gpu_kernel(iter, [=]GPU_LAMBDA(c10::complex<value_t> exp) -> c10::complex<value_t> {
return ::exp(fct * exp);
});
}
/* complex<Half> support impl */
const char pow_scalar_base_name[] = "pow_scalar_base_kernel";
template <>
void pow_scalar_tensor_impl(TensorIteratorBase& iter, c10::complex<at::Half> base) {
using scalar_t = c10::complex<at::Half>;
using opmath_t = at::opmath_type<scalar_t>;
// For complex, thrust::pow uses the identity
// pow(a, b) = exp(log(a) * b)
const auto fct = ::log(opmath_t{base});
#if AT_USE_JITERATOR()
static const auto pow_kernel_string =
jiterator_stringify(template <typename T> T pow_scalar_base_kernel(T exp, T fct) {
return ::exp(fct * exp);
});
jitted_gpu_kernel<pow_scalar_base_name, scalar_t, scalar_t, 1>(
iter,
pow_kernel_string,
/*scalar_pos=*/at::cuda::jit::BinaryFuncVariant::NoScalar,
/*scalar_val=*/0,
/*extra_args=*/std::make_tuple(fct));
#else
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t exp) -> scalar_t {
return ::exp(fct * opmath_t{exp});
});
#endif
}
namespace {
#if AT_USE_JITERATOR()
/* complex<Half> support impl */
const char pow_name[] = "pow_kernel";
static const auto pow_kernel_string =
jiterator_stringify(template <typename T> T pow_kernel(T base, T exp) {
return ::pow(base, exp);
});
#endif
/* complex<Half> support impl */
void pow_chalf_tensor_scalar_impl(TensorIteratorBase& iter, const Scalar& exp_scalar) {
using scalar_t = c10::complex<at::Half>;
using opmath_t = at::opmath_type<scalar_t>;
auto exp = exp_scalar.to<opmath_t>();
#if AT_USE_JITERATOR()
jitted_gpu_kernel<pow_name, scalar_t, scalar_t, 1>(
iter,
pow_kernel_string,
/*scalar_pos=*/at::cuda::jit::BinaryFuncVariant::NoScalar,
/*scalar_val=*/0,
/*extra_args=*/std::make_tuple(exp));
#else
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t base) -> scalar_t {
return ::pow(opmath_t{base}, exp);
});
#endif
}
} // anonymous namespace
void pow_tensor_tensor_kernel(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (common_dtype == kComplexHalf) {
using scalar_t = c10::complex<at::Half>;
if (iter.is_cpu_scalar(1)) {
const auto base = iter.scalar_value<scalar_t>(1);
iter.remove_operand(1);
pow_scalar_tensor_impl(iter, base);
} else if (iter.is_cpu_scalar(2)) {
const auto exp = iter.scalar_value<scalar_t>(2);
iter.remove_operand(2);
pow_chalf_tensor_scalar_impl(iter, exp);
} else {
using opmath_t = at::opmath_type<scalar_t>;
TORCH_INTERNAL_ASSERT(!iter.is_cpu_scalar(1) && !iter.is_cpu_scalar(2));
#if AT_USE_JITERATOR()
jitted_gpu_kernel<pow_name, scalar_t, scalar_t, 2>(
iter, pow_kernel_string);
#else
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return pow_(opmath_t{base}, opmath_t{exp});
});
#endif
}
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
kHalf, kBFloat16, iter.common_dtype(), "pow_cuda", [&] {
if (iter.is_cpu_scalar(1)) {
const auto base = iter.scalar_value<scalar_t>(1);
iter.remove_operand(1);
pow_scalar_tensor_impl(iter, base);
} else if (iter.is_cpu_scalar(2)) {
const auto exp = iter.scalar_value<scalar_t>(2);
iter.remove_operand(2);
pow_tensor_scalar_kernel(iter, exp);
} else {
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t {
return pow_(base, exp);
});
}
});
}
}
template<typename Base_type, typename Exp_type>
void pow_tensor_scalar_kernel_impl(TensorIteratorBase& iter,
Exp_type exp) {
const auto d_exp = static_cast<double>(exp);
// .5 (sqrt), -.5 (rsqrt) and -1 (reciprocal) specializations are handled
// in pow_tensor_scalar_kernel
if (d_exp == 2) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return base * base;
});
} else if (d_exp == 3) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return base * base * base;
});
} else if (d_exp == -2) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return 1.0 / (base * base);
});
} else {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return pow_(base, exp);
});
}
}
void pow_tensor_scalar_kernel(TensorIteratorBase& iter, const Scalar& exp_scalar) {
// Dispatch to fast specialization for sqrt, rsqrt and reciprocal
if (!exp_scalar.isComplex()) {
if (exp_scalar.equal(.5)) {
return sqrt_kernel_cuda(iter);
} else if (exp_scalar.equal(-0.5)) {
return rsqrt_kernel_cuda(iter);
} else if (exp_scalar.equal(-1.0)) {
return reciprocal_kernel_cuda(iter);
}
}
if (isComplexType(iter.common_dtype()) || exp_scalar.isComplex()) {
if (iter.common_dtype() == kComplexHalf) {
using scalar_t = c10::complex<at::Half>;
pow_chalf_tensor_scalar_impl(iter, exp_scalar);
return;
}
AT_DISPATCH_COMPLEX_TYPES(iter.common_dtype(), "pow_cuda", [&]() {
const auto exp = exp_scalar.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t base) -> scalar_t {
return pow_(base, exp);
});
});
} else if (isFloatingType(iter.common_dtype()) || exp_scalar.isIntegral(false)) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "pow_cuda", [&]() {
const auto exp = exp_scalar.to<scalar_t>();
pow_tensor_scalar_kernel_impl<scalar_t>(iter, exp);
});
} else {
TORCH_INTERNAL_ASSERT(false, "invalid combination of type in Pow function, common dtype:", iter.common_dtype(),
"exp is integral?", exp_scalar.isIntegral(false));
}
}
} // anonymous namespace
REGISTER_DISPATCH(pow_tensor_tensor_stub, &pow_tensor_tensor_kernel);
REGISTER_DISPATCH(pow_tensor_scalar_stub, &pow_tensor_scalar_kernel);
}} // namespace at::native
| e0696c773bcc4596827f36539c0d6c846253d778.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Pow.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/Pow.h>
#include <c10/core/Scalar.h>
namespace at { namespace native {
// Forward declare some unary kernels
void rsqrt_kernel_cuda(TensorIteratorBase& iter);
void sqrt_kernel_cuda(TensorIteratorBase& iter);
void reciprocal_kernel_cuda(TensorIteratorBase& iter);
namespace {
void pow_tensor_scalar_kernel(TensorIteratorBase& iter, const Scalar& exp_scalar);
template <typename scalar_t>
void pow_scalar_tensor_impl(TensorIteratorBase& iter, scalar_t base) {
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t exp) -> scalar_t {
return pow_(base, exp);
});
}
template <typename value_t>
void pow_scalar_tensor_impl(TensorIteratorBase& iter, c10::complex<value_t> base) {
// For complex, thrust::pow uses the identity
// pow(a, b) = exp(log(a) * b)
const auto fct = std::log(base);
gpu_kernel(iter, [=]GPU_LAMBDA(c10::complex<value_t> exp) -> c10::complex<value_t> {
return std::exp(fct * exp);
});
}
/* complex<Half> support impl */
const char pow_scalar_base_name[] = "pow_scalar_base_kernel";
template <>
void pow_scalar_tensor_impl(TensorIteratorBase& iter, c10::complex<at::Half> base) {
using scalar_t = c10::complex<at::Half>;
using opmath_t = at::opmath_type<scalar_t>;
// For complex, thrust::pow uses the identity
// pow(a, b) = exp(log(a) * b)
const auto fct = std::log(opmath_t{base});
#if AT_USE_JITERATOR()
static const auto pow_kernel_string =
jiterator_stringify(template <typename T> T pow_scalar_base_kernel(T exp, T fct) {
return std::exp(fct * exp);
});
jitted_gpu_kernel<pow_scalar_base_name, scalar_t, scalar_t, 1>(
iter,
pow_kernel_string,
/*scalar_pos=*/at::cuda::jit::BinaryFuncVariant::NoScalar,
/*scalar_val=*/0,
/*extra_args=*/std::make_tuple(fct));
#else
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t exp) -> scalar_t {
return std::exp(fct * opmath_t{exp});
});
#endif
}
namespace {
#if AT_USE_JITERATOR()
/* complex<Half> support impl */
const char pow_name[] = "pow_kernel";
static const auto pow_kernel_string =
jiterator_stringify(template <typename T> T pow_kernel(T base, T exp) {
return std::pow(base, exp);
});
#endif
/* complex<Half> support impl */
void pow_chalf_tensor_scalar_impl(TensorIteratorBase& iter, const Scalar& exp_scalar) {
using scalar_t = c10::complex<at::Half>;
using opmath_t = at::opmath_type<scalar_t>;
auto exp = exp_scalar.to<opmath_t>();
#if AT_USE_JITERATOR()
jitted_gpu_kernel<pow_name, scalar_t, scalar_t, 1>(
iter,
pow_kernel_string,
/*scalar_pos=*/at::cuda::jit::BinaryFuncVariant::NoScalar,
/*scalar_val=*/0,
/*extra_args=*/std::make_tuple(exp));
#else
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t base) -> scalar_t {
return std::pow(opmath_t{base}, exp);
});
#endif
}
} // anonymous namespace
void pow_tensor_tensor_kernel(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (common_dtype == kComplexHalf) {
using scalar_t = c10::complex<at::Half>;
if (iter.is_cpu_scalar(1)) {
const auto base = iter.scalar_value<scalar_t>(1);
iter.remove_operand(1);
pow_scalar_tensor_impl(iter, base);
} else if (iter.is_cpu_scalar(2)) {
const auto exp = iter.scalar_value<scalar_t>(2);
iter.remove_operand(2);
pow_chalf_tensor_scalar_impl(iter, exp);
} else {
using opmath_t = at::opmath_type<scalar_t>;
TORCH_INTERNAL_ASSERT(!iter.is_cpu_scalar(1) && !iter.is_cpu_scalar(2));
#if AT_USE_JITERATOR()
jitted_gpu_kernel<pow_name, scalar_t, scalar_t, 2>(
iter, pow_kernel_string);
#else
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return pow_(opmath_t{base}, opmath_t{exp});
});
#endif
}
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
kHalf, kBFloat16, iter.common_dtype(), "pow_cuda", [&] {
if (iter.is_cpu_scalar(1)) {
const auto base = iter.scalar_value<scalar_t>(1);
iter.remove_operand(1);
pow_scalar_tensor_impl(iter, base);
} else if (iter.is_cpu_scalar(2)) {
const auto exp = iter.scalar_value<scalar_t>(2);
iter.remove_operand(2);
pow_tensor_scalar_kernel(iter, exp);
} else {
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t {
return pow_(base, exp);
});
}
});
}
}
template<typename Base_type, typename Exp_type>
void pow_tensor_scalar_kernel_impl(TensorIteratorBase& iter,
Exp_type exp) {
const auto d_exp = static_cast<double>(exp);
// .5 (sqrt), -.5 (rsqrt) and -1 (reciprocal) specializations are handled
// in pow_tensor_scalar_kernel
if (d_exp == 2) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return base * base;
});
} else if (d_exp == 3) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return base * base * base;
});
} else if (d_exp == -2) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return 1.0 / (base * base);
});
} else {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return pow_(base, exp);
});
}
}
void pow_tensor_scalar_kernel(TensorIteratorBase& iter, const Scalar& exp_scalar) {
// Dispatch to fast specialization for sqrt, rsqrt and reciprocal
if (!exp_scalar.isComplex()) {
if (exp_scalar.equal(.5)) {
return sqrt_kernel_cuda(iter);
} else if (exp_scalar.equal(-0.5)) {
return rsqrt_kernel_cuda(iter);
} else if (exp_scalar.equal(-1.0)) {
return reciprocal_kernel_cuda(iter);
}
}
if (isComplexType(iter.common_dtype()) || exp_scalar.isComplex()) {
if (iter.common_dtype() == kComplexHalf) {
using scalar_t = c10::complex<at::Half>;
pow_chalf_tensor_scalar_impl(iter, exp_scalar);
return;
}
AT_DISPATCH_COMPLEX_TYPES(iter.common_dtype(), "pow_cuda", [&]() {
const auto exp = exp_scalar.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t base) -> scalar_t {
return pow_(base, exp);
});
});
} else if (isFloatingType(iter.common_dtype()) || exp_scalar.isIntegral(false)) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "pow_cuda", [&]() {
const auto exp = exp_scalar.to<scalar_t>();
pow_tensor_scalar_kernel_impl<scalar_t>(iter, exp);
});
} else {
TORCH_INTERNAL_ASSERT(false, "invalid combination of type in Pow function, common dtype:", iter.common_dtype(),
"exp is integral?", exp_scalar.isIntegral(false));
}
}
} // anonymous namespace
REGISTER_DISPATCH(pow_tensor_tensor_stub, &pow_tensor_tensor_kernel);
REGISTER_DISPATCH(pow_tensor_scalar_stub, &pow_tensor_scalar_kernel);
}} // namespace at::native
|
a8d2132803a31b4bc437b5a53ca9e6705002792c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) GeneralOCR. All rights reserved
#include "common_cuda_helper.hpp"
#include "roi_align_cuda_kernel.cuh"
template <typename scalar_t>
void TRTRoIAlignForwardCUDAKernelLauncher(
const scalar_t* input, const scalar_t* rois, scalar_t* output,
scalar_t* argmax_y, scalar_t* argmax_x, int output_size, int channels,
int height, int width, int aligned_height, int aligned_width,
scalar_t spatial_scale, int sampling_ratio, int pool_mode, bool aligned,
hipStream_t stream) {
hipLaunchKernelGGL(( roi_align_forward_cuda_kernel<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, input, rois, output, argmax_y, argmax_x, aligned_height,
aligned_width, static_cast<scalar_t>(spatial_scale), sampling_ratio,
pool_mode, aligned, channels, height, width);
}
void TRTRoIAlignForwardCUDAKernelLauncher_float(
const float* input, const float* rois, float* output, float* argmax_y,
float* argmax_x, int output_size, int channels, int height, int width,
int aligned_height, int aligned_width, float spatial_scale,
int sampling_ratio, int pool_mode, bool aligned, hipStream_t stream) {
TRTRoIAlignForwardCUDAKernelLauncher<float>(
input, rois, output, argmax_y, argmax_x, output_size, channels, height,
width, aligned_height, aligned_width, spatial_scale, sampling_ratio,
pool_mode, aligned, stream);
}
| a8d2132803a31b4bc437b5a53ca9e6705002792c.cu | // Copyright (c) GeneralOCR. All rights reserved
#include "common_cuda_helper.hpp"
#include "roi_align_cuda_kernel.cuh"
template <typename scalar_t>
void TRTRoIAlignForwardCUDAKernelLauncher(
const scalar_t* input, const scalar_t* rois, scalar_t* output,
scalar_t* argmax_y, scalar_t* argmax_x, int output_size, int channels,
int height, int width, int aligned_height, int aligned_width,
scalar_t spatial_scale, int sampling_ratio, int pool_mode, bool aligned,
cudaStream_t stream) {
roi_align_forward_cuda_kernel<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
output_size, input, rois, output, argmax_y, argmax_x, aligned_height,
aligned_width, static_cast<scalar_t>(spatial_scale), sampling_ratio,
pool_mode, aligned, channels, height, width);
}
void TRTRoIAlignForwardCUDAKernelLauncher_float(
const float* input, const float* rois, float* output, float* argmax_y,
float* argmax_x, int output_size, int channels, int height, int width,
int aligned_height, int aligned_width, float spatial_scale,
int sampling_ratio, int pool_mode, bool aligned, cudaStream_t stream) {
TRTRoIAlignForwardCUDAKernelLauncher<float>(
input, rois, output, argmax_y, argmax_x, output_size, channels, height,
width, aligned_height, aligned_width, spatial_scale, sampling_ratio,
pool_mode, aligned, stream);
}
|
cf09c446de791062674c73c11594847fdd7f20fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/cast.h"
#include "utils/op_kernel.h"
namespace dragon {
namespace kernel {
/*! Argmax <T = ?, Device = CUDA> */
template <typename T>
__global__ void _ArgMax(
const int nthreads,
const int inner_dim,
const int axis_dim,
const T* x,
int64_t* indices,
T* values) {
CUDA_1D_KERNEL_LOOP(y_idx, nthreads) {
const int oix = y_idx / inner_dim;
const int iix = y_idx % inner_dim;
const T* X = x + (oix * axis_dim * inner_dim + iix);
T max_val = X[0], val; int64_t max_idx = 0;
for (int j = 1; j < axis_dim; ++j) {
val = X[j * inner_dim];
if (val > max_val) {
max_val = val;
max_idx = j;
}
}
indices[y_idx] = max_idx;
if (values) values[y_idx] = max_val;
}
}
/*! ArgMax <T = float16, Device = CUDA> */
__global__ void _ArgMaxHalf(
const int nthreads,
const int inner_dim,
const int axis_dim,
const half* x,
int64_t* indices,
half* values) {
CUDA_1D_KERNEL_LOOP(y_idx, nthreads) {
#if __CUDA_ARCH__ >= 530
const int oix = y_idx / inner_dim;
const int iix = y_idx % inner_dim;
const half* X = x + (oix * axis_dim * inner_dim + iix);
half max_val = X[0], val; int64_t max_idx = 0;
for (int j = 1; j < axis_dim; ++j) {
val = X[j * inner_dim];
if (__hgt(val, max_val)) {
max_val = val;
max_idx = j;
}
}
indices[y_idx] = max_idx;
if (values) values[y_idx] = max_val;
#endif
}
}
/*! ArgMin <T = ?, Device = CUDA> */
template <typename T>
__global__ void _ArgMin(
const int nthreads,
const int inner_dim,
const int axis_dim,
const T* x,
int64_t* indices,
T* values) {
CUDA_1D_KERNEL_LOOP(y_idx, nthreads) {
const int oix = y_idx / inner_dim;
const int iix = y_idx % inner_dim;
const T* X = x + (oix * axis_dim * inner_dim + iix);
T min_val = X[0], val; int64_t min_idx = 0;
for (int j = 1; j < axis_dim; ++j) {
val = X[j * inner_dim];
if (val < min_val) {
min_val = val;
min_idx = j;
}
}
indices[y_idx] = min_idx;
if (values) values[y_idx] = min_val;
}
}
/*! ArgMin <T = float16, Device = CUDA> */
__global__ void _ArgMinHalf(
const int nthreads,
const int inner_dim,
const int axis_dim,
const half* x,
int64_t* indices,
half* values) {
CUDA_1D_KERNEL_LOOP(y_idx, nthreads) {
#if __CUDA_ARCH__ >= 530
const int oix = y_idx / inner_dim;
const int iix = y_idx % inner_dim;
const half* X = x + (oix * axis_dim * inner_dim + iix);
half max_val = X[0], val; int64_t max_idx = 0;
for (int j = 1; j < axis_dim; ++j) {
val = X[j * inner_dim];
if (__hlt(val, max_val)) {
max_val = val;
max_idx = j;
}
}
indices[y_idx] = max_idx;
if (values) values[y_idx] = max_val;
#endif
}
}
/*! Kernel Launchers */
#define DEFINE_ARGREDUCE_KERNEL_LAUNCHER(name, T) \
template<> void name<T, CUDAContext>( \
const int outer_dim, \
const int inner_dim, \
const int axis_dim, \
const int top_k, \
const T* x, \
int64_t* indices, \
T* values, \
CUDAContext* ctx) { \
CHECK_EQ(top_k, 1) << "\nRequired top_k == 1."; \
auto nthreads = outer_dim * inner_dim; \
_##name<T> \
<< < CUDA_BLOCKS(nthreads), CUDA_THREADS, \
0, ctx->cuda_stream() >> > \
(nthreads, inner_dim, axis_dim, \
x, indices, values); \
}
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMax, bool);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMax, int8_t);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMax, uint8_t);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMax, int);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMax, int64_t);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMax, float);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMax, double);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMin, bool);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMin, int8_t);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMin, uint8_t);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMin, int);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMin, int64_t);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMin, float);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMin, double);
template<> void ArgMax<float16, CUDAContext>(
const int outer_dim,
const int inner_dim,
const int axis_dim,
const int top_k,
const float16* x,
int64_t* indices,
float16* values,
CUDAContext* ctx) {
CHECK_EQ(top_k, 1) << "\nRequired top_k == 1.";
auto nthreads = outer_dim * inner_dim;
_ArgMaxHalf
<< < CUDA_BLOCKS(nthreads), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(nthreads, inner_dim, axis_dim,
reinterpret_cast<const half*>(x), indices,
reinterpret_cast<half*>(values));
}
template<> void ArgMin<float16, CUDAContext>(
const int outer_dim,
const int inner_dim,
const int axis_dim,
const int top_k,
const float16* x,
int64_t* indices,
float16* values,
CUDAContext* ctx) {
CHECK_EQ(top_k, 1) << "\nRequired top_k == 1.";
auto nthreads = outer_dim * inner_dim;
_ArgMinHalf
<< < CUDA_BLOCKS(nthreads), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(nthreads, inner_dim, axis_dim,
reinterpret_cast<const half*>(x), indices,
reinterpret_cast<half*>(values));
}
#undef DEFINE_ARGREDUCE_KERNEL_LAUNCHER
} // namespace kernel
} // namepsace dragon
#endif // WITH_CUDA | cf09c446de791062674c73c11594847fdd7f20fc.cu | #ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/cast.h"
#include "utils/op_kernel.h"
namespace dragon {
namespace kernel {
/*! Argmax <T = ?, Device = CUDA> */
template <typename T>
__global__ void _ArgMax(
const int nthreads,
const int inner_dim,
const int axis_dim,
const T* x,
int64_t* indices,
T* values) {
CUDA_1D_KERNEL_LOOP(y_idx, nthreads) {
const int oix = y_idx / inner_dim;
const int iix = y_idx % inner_dim;
const T* X = x + (oix * axis_dim * inner_dim + iix);
T max_val = X[0], val; int64_t max_idx = 0;
for (int j = 1; j < axis_dim; ++j) {
val = X[j * inner_dim];
if (val > max_val) {
max_val = val;
max_idx = j;
}
}
indices[y_idx] = max_idx;
if (values) values[y_idx] = max_val;
}
}
/*! ArgMax <T = float16, Device = CUDA> */
__global__ void _ArgMaxHalf(
const int nthreads,
const int inner_dim,
const int axis_dim,
const half* x,
int64_t* indices,
half* values) {
CUDA_1D_KERNEL_LOOP(y_idx, nthreads) {
#if __CUDA_ARCH__ >= 530
const int oix = y_idx / inner_dim;
const int iix = y_idx % inner_dim;
const half* X = x + (oix * axis_dim * inner_dim + iix);
half max_val = X[0], val; int64_t max_idx = 0;
for (int j = 1; j < axis_dim; ++j) {
val = X[j * inner_dim];
if (__hgt(val, max_val)) {
max_val = val;
max_idx = j;
}
}
indices[y_idx] = max_idx;
if (values) values[y_idx] = max_val;
#endif
}
}
/*! ArgMin <T = ?, Device = CUDA> */
template <typename T>
__global__ void _ArgMin(
const int nthreads,
const int inner_dim,
const int axis_dim,
const T* x,
int64_t* indices,
T* values) {
CUDA_1D_KERNEL_LOOP(y_idx, nthreads) {
const int oix = y_idx / inner_dim;
const int iix = y_idx % inner_dim;
const T* X = x + (oix * axis_dim * inner_dim + iix);
T min_val = X[0], val; int64_t min_idx = 0;
for (int j = 1; j < axis_dim; ++j) {
val = X[j * inner_dim];
if (val < min_val) {
min_val = val;
min_idx = j;
}
}
indices[y_idx] = min_idx;
if (values) values[y_idx] = min_val;
}
}
/*! ArgMin <T = float16, Device = CUDA> */
__global__ void _ArgMinHalf(
const int nthreads,
const int inner_dim,
const int axis_dim,
const half* x,
int64_t* indices,
half* values) {
CUDA_1D_KERNEL_LOOP(y_idx, nthreads) {
#if __CUDA_ARCH__ >= 530
const int oix = y_idx / inner_dim;
const int iix = y_idx % inner_dim;
const half* X = x + (oix * axis_dim * inner_dim + iix);
half max_val = X[0], val; int64_t max_idx = 0;
for (int j = 1; j < axis_dim; ++j) {
val = X[j * inner_dim];
if (__hlt(val, max_val)) {
max_val = val;
max_idx = j;
}
}
indices[y_idx] = max_idx;
if (values) values[y_idx] = max_val;
#endif
}
}
/*! Kernel Launchers */
#define DEFINE_ARGREDUCE_KERNEL_LAUNCHER(name, T) \
template<> void name<T, CUDAContext>( \
const int outer_dim, \
const int inner_dim, \
const int axis_dim, \
const int top_k, \
const T* x, \
int64_t* indices, \
T* values, \
CUDAContext* ctx) { \
CHECK_EQ(top_k, 1) << "\nRequired top_k == 1."; \
auto nthreads = outer_dim * inner_dim; \
_##name<T> \
<< < CUDA_BLOCKS(nthreads), CUDA_THREADS, \
0, ctx->cuda_stream() >> > \
(nthreads, inner_dim, axis_dim, \
x, indices, values); \
}
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMax, bool);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMax, int8_t);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMax, uint8_t);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMax, int);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMax, int64_t);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMax, float);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMax, double);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMin, bool);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMin, int8_t);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMin, uint8_t);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMin, int);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMin, int64_t);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMin, float);
DEFINE_ARGREDUCE_KERNEL_LAUNCHER(ArgMin, double);
template<> void ArgMax<float16, CUDAContext>(
const int outer_dim,
const int inner_dim,
const int axis_dim,
const int top_k,
const float16* x,
int64_t* indices,
float16* values,
CUDAContext* ctx) {
CHECK_EQ(top_k, 1) << "\nRequired top_k == 1.";
auto nthreads = outer_dim * inner_dim;
_ArgMaxHalf
<< < CUDA_BLOCKS(nthreads), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(nthreads, inner_dim, axis_dim,
reinterpret_cast<const half*>(x), indices,
reinterpret_cast<half*>(values));
}
template<> void ArgMin<float16, CUDAContext>(
const int outer_dim,
const int inner_dim,
const int axis_dim,
const int top_k,
const float16* x,
int64_t* indices,
float16* values,
CUDAContext* ctx) {
CHECK_EQ(top_k, 1) << "\nRequired top_k == 1.";
auto nthreads = outer_dim * inner_dim;
_ArgMinHalf
<< < CUDA_BLOCKS(nthreads), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(nthreads, inner_dim, axis_dim,
reinterpret_cast<const half*>(x), indices,
reinterpret_cast<half*>(values));
}
#undef DEFINE_ARGREDUCE_KERNEL_LAUNCHER
} // namespace kernel
} // namepsace dragon
#endif // WITH_CUDA |
2733ec5895bac6967f17dd1328fa7eec48f642bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_2_right;
int xdim0_update_halo_kernel5_plus_2_right_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_2_right;
int ydim0_update_halo_kernel5_plus_2_right_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_2_right;
int xdim1_update_halo_kernel5_plus_2_right_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_2_right;
int ydim1_update_halo_kernel5_plus_2_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_plus_2_right*(y)+xdim0_update_halo_kernel5_plus_2_right*ydim0_update_halo_kernel5_plus_2_right*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_plus_2_right*(y)+xdim1_update_halo_kernel5_plus_2_right*ydim1_update_halo_kernel5_plus_2_right*(z))
//user function
__device__
inline void update_halo_kernel5_plus_2_right_gpu(double *vol_flux_z, double *mass_flux_z, const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = (vol_flux_z[OPS_ACC0(-2,0,0)]);
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = (mass_flux_z[OPS_ACC1(-2,0,0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_2_right(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel5_plus_2_right + idx_z * 1*1 * xdim0_update_halo_kernel5_plus_2_right * ydim0_update_halo_kernel5_plus_2_right;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel5_plus_2_right + idx_z * 1*1 * xdim1_update_halo_kernel5_plus_2_right * ydim1_update_halo_kernel5_plus_2_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_2_right_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_2_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_plus_2_right_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,91)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(91,"update_halo_kernel5_plus_2_right");
OPS_kernels[91].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_2_right_h || ydim0 != ydim0_update_halo_kernel5_plus_2_right_h || xdim1 != xdim1_update_halo_kernel5_plus_2_right_h || ydim1 != ydim1_update_halo_kernel5_plus_2_right_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel5_plus_2_right, &xdim0, sizeof(int) );
xdim0_update_halo_kernel5_plus_2_right_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel5_plus_2_right, &ydim0, sizeof(int) );
ydim0_update_halo_kernel5_plus_2_right_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel5_plus_2_right, &xdim1, sizeof(int) );
xdim1_update_halo_kernel5_plus_2_right_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel5_plus_2_right, &ydim1, sizeof(int) );
ydim1_update_halo_kernel5_plus_2_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[91].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel5_plus_2_right), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[91].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[91].mpi_time += t2-t1;
OPS_kernels[91].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[91].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_2_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 91;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 91;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_plus_2_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(91,"update_halo_kernel5_plus_2_right");
}
ops_enqueue_kernel(desc);
}
#endif
| 2733ec5895bac6967f17dd1328fa7eec48f642bc.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_2_right;
int xdim0_update_halo_kernel5_plus_2_right_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_2_right;
int ydim0_update_halo_kernel5_plus_2_right_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_2_right;
int xdim1_update_halo_kernel5_plus_2_right_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_2_right;
int ydim1_update_halo_kernel5_plus_2_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_plus_2_right*(y)+xdim0_update_halo_kernel5_plus_2_right*ydim0_update_halo_kernel5_plus_2_right*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_plus_2_right*(y)+xdim1_update_halo_kernel5_plus_2_right*ydim1_update_halo_kernel5_plus_2_right*(z))
//user function
__device__
inline void update_halo_kernel5_plus_2_right_gpu(double *vol_flux_z, double *mass_flux_z, const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = (vol_flux_z[OPS_ACC0(-2,0,0)]);
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = (mass_flux_z[OPS_ACC1(-2,0,0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_2_right(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel5_plus_2_right + idx_z * 1*1 * xdim0_update_halo_kernel5_plus_2_right * ydim0_update_halo_kernel5_plus_2_right;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel5_plus_2_right + idx_z * 1*1 * xdim1_update_halo_kernel5_plus_2_right * ydim1_update_halo_kernel5_plus_2_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_2_right_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_2_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_plus_2_right_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,91)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(91,"update_halo_kernel5_plus_2_right");
OPS_kernels[91].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_2_right_h || ydim0 != ydim0_update_halo_kernel5_plus_2_right_h || xdim1 != xdim1_update_halo_kernel5_plus_2_right_h || ydim1 != ydim1_update_halo_kernel5_plus_2_right_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel5_plus_2_right, &xdim0, sizeof(int) );
xdim0_update_halo_kernel5_plus_2_right_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel5_plus_2_right, &ydim0, sizeof(int) );
ydim0_update_halo_kernel5_plus_2_right_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel5_plus_2_right, &xdim1, sizeof(int) );
xdim1_update_halo_kernel5_plus_2_right_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel5_plus_2_right, &ydim1, sizeof(int) );
ydim1_update_halo_kernel5_plus_2_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[91].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel5_plus_2_right<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[91].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[91].mpi_time += t2-t1;
OPS_kernels[91].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[91].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_2_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 91;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 91;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_plus_2_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(91,"update_halo_kernel5_plus_2_right");
}
ops_enqueue_kernel(desc);
}
#endif
|
3bd3d7ec7fdb1f1bdee476d17c831c6a8dc8ce37.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "contrib_ops/cuda/bert/add_bias_transpose.h"
#include "contrib_ops/cuda/bert/rotary_embedding_util.h"
using namespace onnxruntime::cuda;
namespace onnxruntime {
namespace contrib {
namespace cuda {
template <typename T>
__global__ void AddBiasTransposeTrt(const T* input, const T* biases, T* output) {
// Format 2 for TensorRT fused attention (N*H <= 1024)
// Input: BxSxMxNxH
// Output: BxSxNxMxH
// B is batch_size, S is sequence_length, M is number of matrices, N is num_heads, H is head_size
// This kernel could support hidden size up to 4 * 1024 when T is Half4 and input is half.
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int H = blockDim.x;
const int N = blockDim.y;
const int S = gridDim.x;
const int M = gridDim.z;
const int NH = N * H;
const int offset = (b * S + s) * M * NH;
const int in_offset = offset + m * NH + n * H;
const int out_offset = offset + (n * M + m) * H;
const int h = threadIdx.x;
if (h < H) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
}
}
template <typename T>
__global__ void AddBiasTransposeTrtLarge(const int head_size, const T* input, const T* biases, T* output) {
// Format 2 for TensorRT fused attention (N*H > 1024)
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z;
const int stride = blockDim.x;
const int H = head_size;
const int N = blockDim.y;
const int S = gridDim.x;
const int M = gridDim.z;
const int NH = N * H;
const int offset = (b * S + s) * M * NH;
const int in_offset = offset + m * NH + n * H;
const int out_offset = offset + (n * M + m) * H;
int h = threadIdx.x;
while (h < H) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
h += stride;
}
}
template <typename T>
__global__ void AddBiasTransposeTrt(const T* query, const T* key, const T* value, const T* biases, T* output) {
// Separated Q/K/V inputs for TensorRT fused attention (N*H <= 1024)
// Q: BxSxNxH
// K: BxSxNxH
// V: BxSxNxH
// Output: BxSxNxMxH
// B is batch_size, S is sequence_length, M is number of matrices (3), N is num_heads, H is head_size
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int H = blockDim.x;
const int N = blockDim.y;
const int S = gridDim.x;
const int M = gridDim.z;
const T* input = (m == 0 ? query : (m == 1 ? key : value));
const int NH = N * H;
const int in_offset = (b * S + s) * NH + n * H;
const int out_offset = (b * S + s) * M * NH + (n * M + m) * H;
const int h = threadIdx.x;
if (h < H) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
}
}
template <typename T>
__global__ void AddBiasTransposeTrtLarge(const int head_size,
const T* query, const T* key, const T* value, const T* biases, T* output) {
// Separated Q/K/V inputs for TensorRT fused attention (N*H > 1024)
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int stride = blockDim.x;
const int H = head_size;
const int N = blockDim.y;
const int S = gridDim.x;
const int M = gridDim.z;
const T* input = (m == 0 ? query : (m == 1 ? key : value));
const int NH = N * H;
const int in_offset = (b * S + s) * NH + n * H;
const int out_offset = (b * S + s) * M * NH + (n * M + m) * H;
int h = threadIdx.x;
if (h < H) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
h += stride;
}
}
template <typename T>
__global__ void AddBiasTransposeTrtKV(const T* key, const T* value, const T* biases, T* output) {
// Separated K/V inputs for TensorRT fused cross attention (N*H <= 1024)
// K: BxSxNxH
// V: BxSxNxH
// Output: BxSxNxMxH (packed KV, requires H = H_v)
// B is batch_size, S is sequence_length, M is number of matrices (2), N is num_heads, H is head_size
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int H = blockDim.x;
const int N = blockDim.y;
const int S = gridDim.x;
const int NH = N * H;
const int in_offset = (b * S + s) * NH + n * H;
const T* input = (m == 0 ? key : value);
constexpr int M = 2;
const int out_offset = (b * S + s) * M * NH + (n * M + m) * H;
const int h = threadIdx.x;
if (h < H) {
output[out_offset + h] = input[in_offset + h] + biases[(m + 1) * NH + n * H + h];
}
}
template <typename T>
__global__ void AddBiasTransposeTrtKVLarge(const int head_size,
const T* key, const T* value, const T* biases,
T* output) {
// Separated K/V inputs for TensorRT fused cross attention (N*H > 1024)
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int stride = blockDim.x;
const int H = head_size;
const int N = blockDim.y;
const int S = gridDim.x;
const int NH = N * H;
const int in_offset = (b * S + s) * NH + n * H;
const T* input = (m == 0 ? key : value);
constexpr int M = 2;
const int out_offset = (b * S + s) * M * NH + (n * M + m) * H;
int h = threadIdx.x;
while (h < H) {
output[out_offset + h] = input[in_offset + h] + biases[(m + 1) * NH + n * H + h];
h += stride;
}
}
template <typename T>
__global__ void AddBiasTransposeQKV(int M, const T* input, const T* biases, T* output, T* qkv_add_bias) {
// Format 1 for unfused attention, or fused causal attention
// Input: BxSxMxNxH
// Output: MxBxNxSxH
// qkv_add_bias: BxSxMxNxH
// B is batch_size, S is sequence_length, M is number of matrices, N is num_heads, H is head_size
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int head_size = blockDim.x;
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int H = head_size;
const int NH = num_heads * head_size;
const int NHS = NH * sequence_length;
int in_offset = n * head_size + (m + s * M) * NH + b * NHS * M;
const int out_offset = s * head_size + n * sequence_length * H + b * NHS + m * NHS * batch_size;
const int h = threadIdx.x;
if (h < head_size) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
if (nullptr != qkv_add_bias) {
qkv_add_bias[in_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
}
}
}
#ifndef USE_ROCM
template <typename T>
__global__ void AddBiasTransposeQKV(int M, const T* input, const T* biases, T* output, T* qkv_add_bias,
const int rotary_embedding_dim, const int head_size, const int step,
const int format) {
// AddBiasTransposeQKV with rotary embedding
// Format 1 for unfused attention, or fused causal attention
// Input: BxSxMxNxH
// Output: MxBxNxSxH
// qkv_add_bias: BxSxMxNxH
// Format 2 for fused TRT attention
// Input: BxSxMxNxH
// Output: BxSxNxMxH
// qkv_add_bias: BxSxMxNxH
// Format 3 for cutlass memory efficient attention
// Input: BxSxMxNxH
// Output: MxBxSxNxH
// B is batch_size, S is sequence_length, M is number of matrices, N is num_heads, H is head_size
int n = blockIdx.y;
int s = blockIdx.x;
int b = blockIdx.z;
const int seq_len = (gridDim.x == step) ? s : step;
const int num_heads = gridDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.z;
const int H = head_size;
const int NH = num_heads * head_size;
const int NHS = NH * sequence_length;
constexpr int vec_size = Vec_t<T>::size;
using Vec_t = typename Vec_t<T>::Type;
extern __shared__ __align__(sizeof(float2)) char smem_[];
int tidx = threadIdx.x;
const int head_idx = tidx * vec_size;
if (head_idx < head_size) {
const bool is_masked = head_idx >= head_size;
const int input_offset_base = n * head_size + (s * M) * NH + b * NHS * M;
const int src_q_idx = input_offset_base + head_idx;
const int src_k_idx = input_offset_base + NH + head_idx;
const int src_v_idx = input_offset_base + 2 * NH + head_idx;
Vec_t q, k, v;
Vec_t q_bias, k_bias, v_bias;
if (!is_masked) {
q = *reinterpret_cast<const Vec_t*>(&input[src_q_idx]);
k = *reinterpret_cast<const Vec_t*>(&input[src_k_idx]);
v = *reinterpret_cast<const Vec_t*>(&input[src_v_idx]);
q_bias = *reinterpret_cast<const Vec_t*>(&biases[n * H + head_idx]);
k_bias = *reinterpret_cast<const Vec_t*>(&biases[NH + n * H + head_idx]);
v_bias = *reinterpret_cast<const Vec_t*>(&biases[2 * NH + n * H + head_idx]);
}
q = add_vec(q, q_bias);
k = add_vec(k, k_bias);
v = add_vec(v, v_bias);
const bool do_rotary = !is_masked && vec_size * tidx < rotary_embedding_dim;
T* q_smem = reinterpret_cast<T*>(smem_);
T* k_smem = q_smem + rotary_embedding_dim;
const int half_rotary_dim = rotary_embedding_dim / 2;
const int half_idx = (head_idx) / half_rotary_dim;
const int intra_half_idx = (head_idx) % half_rotary_dim;
const int smem_pitch = half_rotary_dim;
if (do_rotary) {
*reinterpret_cast<Vec_t*>(q_smem + half_idx * smem_pitch + intra_half_idx) = q;
*reinterpret_cast<Vec_t*>(k_smem + half_idx * smem_pitch + intra_half_idx) = k;
}
__syncthreads();
const int transpose_idx = half_idx * (half_rotary_dim / 2) + intra_half_idx / 2;
constexpr int tidx_factor = vec_size / 2;
if (do_rotary) {
vec_from_smem_transpose(q, q_smem, transpose_idx, smem_pitch);
vec_from_smem_transpose(k, k_smem, transpose_idx, smem_pitch);
apply_rotary_embedding(q, k, transpose_idx / tidx_factor, rotary_embedding_dim, seq_len);
write_smem_transpose(q, q_smem, transpose_idx, smem_pitch);
write_smem_transpose(k, k_smem, transpose_idx, smem_pitch);
}
__syncthreads();
if (do_rotary) {
q = *reinterpret_cast<Vec_t*>(q_smem + half_idx * smem_pitch + intra_half_idx);
k = *reinterpret_cast<Vec_t*>(k_smem + half_idx * smem_pitch + intra_half_idx);
}
int dest_q_idx;
int dest_k_idx;
int dest_v_idx;
// Format 1
if (format == 1) {
const int output_offset_base = s * head_size + n * sequence_length * H + b * NHS;
dest_q_idx = output_offset_base + head_idx;
dest_k_idx = output_offset_base + NHS * batch_size + head_idx;
dest_v_idx = output_offset_base + 2 * NHS * batch_size + head_idx;
}
// Format 2
if (format == 2) {
const int output_offset_base = M * (b * NHS + s * NH + n * H);
dest_q_idx = output_offset_base + head_idx;
dest_k_idx = output_offset_base + H + head_idx;
dest_v_idx = output_offset_base + 2 * H + head_idx;
}
// Format 3
if (format == 3) {
const int output_offset_base = n * H + s * NH + b * NHS;
dest_q_idx = output_offset_base + head_idx;
dest_k_idx = output_offset_base + NHS * batch_size + head_idx;
dest_v_idx = output_offset_base + 2 * NHS * batch_size + head_idx;
}
if (!is_masked) {
*reinterpret_cast<Vec_t*>(&output[dest_q_idx]) = q;
*reinterpret_cast<Vec_t*>(&output[dest_k_idx]) = k;
*reinterpret_cast<Vec_t*>(&output[dest_v_idx]) = v;
if (nullptr != qkv_add_bias) {
*reinterpret_cast<Vec_t*>(&qkv_add_bias[src_q_idx]) = q;
*reinterpret_cast<Vec_t*>(&qkv_add_bias[src_k_idx]) = k;
*reinterpret_cast<Vec_t*>(&qkv_add_bias[src_v_idx]) = v;
}
}
}
}
#endif
// this suppose 3 matrix in total
template <typename T>
__global__ void AddBiasTransposeQKV(const T* input, const T* biases, T* output, int v_head_size) {
// Format 1 for unfused attention
// Input: BxSx(NxH + NxH + NxH_v) (Packed QKV where K and V has different hidden sizes)
// Output: BxNxSxH + BxNxSxH + BxNxSxH_v
// B is batch_size, S is sequence_length, N is num_heads, H is qk_head_size, H_v is v_head_size
int n = threadIdx.y; // head_num_id
int s = blockIdx.x; // sequence_id
int b = blockIdx.y; // batch_id
int m = blockIdx.z; // matrix id (Q=0, K=1, V=2)
const int h = threadIdx.x; // head_element_id
const int qk_head_size = blockDim.x;
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int head_size = (m == 2 ? v_head_size : qk_head_size);
const int total_head_size = num_heads * (qk_head_size + qk_head_size + v_head_size);
int in_offset;
int out_offset;
int bias_offset;
in_offset = b * (total_head_size * sequence_length) + // B
s * (total_head_size) + // S
m * (qk_head_size * num_heads) + // M
n * head_size + // N
h; // H
out_offset = m * (num_heads * qk_head_size * sequence_length * batch_size) + // M
b * (num_heads * head_size * sequence_length) + // B
n * (sequence_length * head_size) + // N
s * (head_size) + // S
h; // H
bias_offset = m * (num_heads * qk_head_size) + // M
n * (head_size) + // N
h; // H
if (h < head_size) {
output[out_offset] = input[in_offset] + biases[bias_offset];
}
}
template <typename T>
__global__ void AddBiasTransposeQKVLarge(const int head_size, const T* input, const T* biases, T* output,
T* qkv_add_bias, const int M) {
// Format 1 for unfused attention (N*H > 1024), or fused causal attention
// Input: BxSxMxNxH (Packed QKV)
// Output: MxBxNxSxH
// qkv_add_bias: BxSxMxNxH
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int stride = blockDim.x;
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int H = head_size;
const int NH = num_heads * H;
const int NHS = NH * sequence_length;
int in_offset = n * H + (m + s * M) * NH + b * NHS * M;
const int out_offset = s * H + n * sequence_length * H + b * NHS + m * NHS * batch_size;
int h = threadIdx.x;
while (h < H) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
if (nullptr != qkv_add_bias) {
qkv_add_bias[in_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
}
h += stride;
}
}
template <typename T>
__global__ void AddBiasTransposeCutlass(const T* input, const T* biases, T* output, int v_head_size) {
// Format 3 for cutlass memory efficient attention
// Input: BxSx(NxH + NxH + NxH_v) (Packed QKV where K and V has different hidden sizes)
// Output: BxNxSxH + BxNxSxH + BxNxSxH_v
// B is batch_size, S is sequence_length, N is num_heads, H is qk_head_size, H_v is v_head_size
int n = threadIdx.y; // head_num_id
int s = blockIdx.x; // sequence_id
int b = blockIdx.y; // batch_id
int m = blockIdx.z; // matrix id (Q=0, K=1, V=2)
const int h = threadIdx.x; // head_element_id
const int qk_head_size = blockDim.x;
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int head_size = (m == 2 ? v_head_size : qk_head_size);
const int total_head_size = num_heads * (qk_head_size + qk_head_size + v_head_size);
int in_offset;
int out_offset;
int bias_offset;
in_offset = b * (total_head_size * sequence_length) + // B
s * (total_head_size) + // S
m * (qk_head_size * num_heads) + // M
n * head_size + // N
h; // H
out_offset = m * (num_heads * qk_head_size * sequence_length * batch_size) + // M
b * (num_heads * head_size * sequence_length) + // B
s * (num_heads * head_size) + // S
n * (head_size) + // N
h; // H
bias_offset = m * (num_heads * qk_head_size) + // M
n * (head_size) + // N
h; // H
if (h < head_size) {
output[out_offset] = input[in_offset] + biases[bias_offset];
}
}
template <typename T>
__global__ void AddBiasUnpack(int M, const T* input, const T* biases, T* output) {
// Format 4 to unpack TRT packed input format for memory efficient attention.
// Input: BxSxNxMxH
// Output: MxBxSxNxH
// B is batch_size, S is sequence_length, M is number of matrices, N is num_heads, H is head_size
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int head_size = blockDim.x;
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int H = head_size;
const int NH = num_heads * head_size;
const int NHS = NH * sequence_length;
int in_offset = m * head_size + n * M * H + (s * NH + b * NHS) * M;
const int out_offset = n * head_size + s * NH + b * NHS + m * NHS * batch_size;
const int h = threadIdx.x;
if (h < head_size) {
if (biases != nullptr) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
} else {
output[out_offset + h] = input[in_offset + h];
}
}
}
template <typename T>
__global__ void AddBiasTransposeCutlass(int M, const T* input, const T* biases, T* output) {
// Format 3 for cutlass memory efficient attention
// Input: BxSxMxNxH
// Output: MxBxSxNxH
// B is batch_size, S is sequence_length, M is number of matrices, N is num_heads, H is head_size
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int head_size = blockDim.x;
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int H = head_size;
const int NH = num_heads * head_size;
const int NHS = NH * sequence_length;
int in_offset = n * head_size + (m + s * M) * NH + b * NHS * M;
const int out_offset = n * head_size + s * NH + b * NHS + m * NHS * batch_size;
const int h = threadIdx.x;
if (h < head_size) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
}
}
template <typename T>
__global__ void AddBiasTransposeCutlassLarge(const int head_size, const T* input, const T* biases, T* output,
const int M) {
// Format 3 for cutlass memory efficient attention
// Input: BxSxMxNxH (Packed QKV)
// Output: MxBxSxNxH
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int stride = blockDim.x;
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int H = head_size;
const int NH = num_heads * H;
const int NHS = NH * sequence_length;
int in_offset = n * H + (m + s * M) * NH + b * NHS * M;
const int out_offset = n * H + s * NH + b * NHS + m * NHS * batch_size;
int h = threadIdx.x;
while (h < H) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
h += stride;
}
}
template <typename T>
__global__ void AddBiasTranspose(const T* input, const T* biases, T* output) {
// Format 0 for Separated Q, K, V (N*H <= 1024)
// Input: MxBxSxNxH
// Output: MxBxNxSxH
// B is batch_size, S is sequence_length, M is number of matrices, N is num_heads, H is head_size
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int head_size = blockDim.x;
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int H = head_size;
const int NH = num_heads * head_size;
const int NHS = NH * sequence_length;
int in_offset = n * H + s * NH + (b + m * batch_size) * NHS;
const int out_offset = (s + n * sequence_length) * H + (b + m * batch_size) * NHS;
const int h = threadIdx.x;
if (h < head_size) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
}
}
template <typename T>
__global__ void AddBiasTransposeLarge(const int head_size, const T* input, const T* biases, T* output) {
// Format 0 for Separated Q, K, V (N*H > 1024)
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int stride = blockDim.x;
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int H = head_size;
const int NH = num_heads * H;
const int NHS = NH * sequence_length;
int in_offset = n * H + s * NH + (b + m * batch_size) * NHS;
const int out_offset = (s + n * sequence_length) * H + (b + m * batch_size) * NHS;
int h = threadIdx.x;
while (h < H) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
h += stride;
}
}
template <typename T>
void InvokeAddBiasTranspose(
hipStream_t stream, const int num_matrices, const int format, const int max_threads_per_block,
const int batch_size, const int sequence_length, const int num_heads, const int qk_head_size,
const T* input, const T* biases, T* output, T* qkv_add_bias, const int v_head_size, int total_matrix_count,
bool do_rotary = false, int original_past_sequence_length = 0) {
assert(num_heads <= max_threads_per_block);
if (do_rotary) {
#ifdef USE_ROCM
ORT_THROW("Rotary Attention is not supported on ROCm");
#elif !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 530
if (format != 1 && format != 2 && format != 3) {
ORT_THROW("format must be 1, 2 or 3 for rotary attention");
}
if (v_head_size != -1 && qk_head_size != v_head_size) {
ORT_THROW("qk_head_size must be equal to v_head_size for rotary attention");
}
const int step = original_past_sequence_length == 0 ? sequence_length : original_past_sequence_length;
size_t smem_size = 2 * qk_head_size * sizeof(T);
const dim3 grid(sequence_length, num_heads, batch_size);
const dim3 block((qk_head_size / 2 + 31) / 32 * 32, 1, 1);
hipLaunchKernelGGL(( AddBiasTransposeQKV<T>), dim3(grid), dim3(block), smem_size, stream, total_matrix_count, input, biases, output,
qkv_add_bias, qk_head_size, qk_head_size,
step, format);
#else
ORT_THROW("Rotary Attention is supported on sm >= 530. Current sm is", __CUDA_ARCH__);
#endif
return;
}
const dim3 grid(sequence_length, batch_size, num_matrices);
if (qk_head_size * num_heads <= max_threads_per_block) {
const dim3 block(qk_head_size, num_heads, 1);
if (format == 2) {
hipLaunchKernelGGL(( AddBiasTransposeTrt<T>), dim3(grid), dim3(block), 0, stream, input, biases, output);
} else if (format == 1) {
if (v_head_size == -1 || qk_head_size == v_head_size) {
hipLaunchKernelGGL(( AddBiasTransposeQKV<T>), dim3(grid), dim3(block), 0, stream, total_matrix_count, input, biases, output, qkv_add_bias);
} else {
ORT_ENFORCE(total_matrix_count == 3);
hipLaunchKernelGGL(( AddBiasTransposeQKV<T>), dim3(grid), dim3(block), 0, stream, input, biases, output, v_head_size);
}
} else if (format == 3) {
if (v_head_size == -1 || qk_head_size == v_head_size) {
hipLaunchKernelGGL(( AddBiasTransposeCutlass<T>), dim3(grid), dim3(block), 0, stream, total_matrix_count, input, biases, output);
} else {
ORT_ENFORCE(total_matrix_count == 3);
hipLaunchKernelGGL(( AddBiasTransposeCutlass<T>), dim3(grid), dim3(block), 0, stream, input, biases, output, v_head_size);
}
} else if (format == 4) { // format == 4
hipLaunchKernelGGL(( AddBiasUnpack<T>), dim3(grid), dim3(block), 0, stream, total_matrix_count, input, biases, output);
} else { // format == 0
hipLaunchKernelGGL(( AddBiasTranspose<T>), dim3(grid), dim3(block), 0, stream, input, biases, output);
}
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
if (format == 2) {
hipLaunchKernelGGL(( AddBiasTransposeTrtLarge<T>), dim3(grid), dim3(block), 0, stream, qk_head_size, input, biases, output);
} else if (format == 1) {
if (v_head_size == -1 || qk_head_size == v_head_size) {
hipLaunchKernelGGL(( AddBiasTransposeQKVLarge<T>), dim3(grid), dim3(block), 0, stream, qk_head_size, input, biases, output,
qkv_add_bias, total_matrix_count);
} else {
// It is rare for hidden size > 4096 (for half precision) and qk_head_size != v_head_size.
ORT_THROW("AddBiasTranspose (format 1) not implemented for hidden_size > max_threads_per_block when qk_head_size != v_head_size");
}
} else if (format == 3) {
if (v_head_size == -1 || qk_head_size == v_head_size) {
hipLaunchKernelGGL(( AddBiasTransposeCutlassLarge<T>), dim3(grid), dim3(block), 0, stream, qk_head_size, input, biases, output,
total_matrix_count);
} else {
ORT_THROW("AddBiasTranspose (format 3) not implemented for hidden_size > max_threads_per_block when qk_head_size != v_head_size");
}
} else if (format == 4) { // format == 4
ORT_THROW("AddBiasTranspose (format 4) not implemented for hidden_size > max_threads_per_block");
} else { // format 0
hipLaunchKernelGGL(( AddBiasTransposeLarge<T>), dim3(grid), dim3(block), 0, stream, qk_head_size, input, biases, output);
}
}
}
template <>
void LaunchAddBiasTranspose(
hipStream_t stream, const int num_matrices, const int format, const int max_threads_per_block,
const int batch_size, const int sequence_length, const int num_heads, const int qk_head_size,
const half* input, const half* biases, half* output, bool enable_half4, const int v_head_size,
half* qkv_add_bias, int total_matrix_count, bool do_rotary, int original_past_sequence_length) {
total_matrix_count = ::max(num_matrices, total_matrix_count);
if (enable_half4 && 0 == (qk_head_size % 4) && (v_head_size == -1 || 0 == (v_head_size % 4)) && !do_rotary) {
const int H = qk_head_size / 4;
const int H_v = v_head_size / 4;
const Half4* input2 = reinterpret_cast<const Half4*>(input);
const Half4* biases2 = reinterpret_cast<const Half4*>(biases);
Half4* output2 = reinterpret_cast<Half4*>(output);
Half4* qkv_add_bias2 = reinterpret_cast<Half4*>(qkv_add_bias);
InvokeAddBiasTranspose<Half4>(stream, num_matrices, format, max_threads_per_block,
batch_size, sequence_length, num_heads, H, input2, biases2, output2,
qkv_add_bias2, H_v, total_matrix_count);
} else if (0 == (qk_head_size & 1) && (v_head_size == -1 || 0 == (v_head_size & 1)) && !do_rotary) {
const int H = qk_head_size / 2;
const int H_v = v_head_size / 2;
const half2* input2 = reinterpret_cast<const half2*>(input);
const half2* biases2 = reinterpret_cast<const half2*>(biases);
half2* output2 = reinterpret_cast<half2*>(output);
half2* qkv_add_bias2 = reinterpret_cast<half2*>(qkv_add_bias);
InvokeAddBiasTranspose<half2>(stream, num_matrices, format, max_threads_per_block,
batch_size, sequence_length, num_heads, H, input2, biases2, output2,
qkv_add_bias2, H_v, total_matrix_count);
} else {
InvokeAddBiasTranspose<half>(
stream, num_matrices, format, max_threads_per_block,
batch_size, sequence_length, num_heads, qk_head_size, input, biases, output,
qkv_add_bias, v_head_size, total_matrix_count, do_rotary, original_past_sequence_length);
}
}
template <>
void LaunchAddBiasTranspose(
hipStream_t stream, const int num_matrices, const int format, const int max_threads_per_block,
const int batch_size, const int sequence_length, const int num_heads, const int qk_head_size,
const float* input, const float* biases, float* output, bool /*enable_half4*/,
const int v_head_size, float* qkv_add_bias, int total_matrix_count, bool do_rotary,
int original_past_sequence_length) {
total_matrix_count = ::max(num_matrices, total_matrix_count);
if (0 == (qk_head_size % 4) && (v_head_size == -1 || 0 == (v_head_size % 4)) && !do_rotary) {
const int H = qk_head_size / 4;
const float4* input2 = reinterpret_cast<const float4*>(input);
const float4* biases2 = reinterpret_cast<const float4*>(biases);
float4* output2 = reinterpret_cast<float4*>(output);
float4* qkv_add_bias2 = reinterpret_cast<float4*>(qkv_add_bias);
InvokeAddBiasTranspose<float4>(
stream, num_matrices, format, max_threads_per_block,
batch_size, sequence_length, num_heads, H, input2, biases2, output2,
qkv_add_bias2, v_head_size / 4, total_matrix_count);
} else if (0 == (qk_head_size & 1) && (v_head_size == -1 || 0 == (v_head_size & 1)) && !do_rotary) {
const int H = qk_head_size / 2;
const float2* input2 = reinterpret_cast<const float2*>(input);
const float2* biases2 = reinterpret_cast<const float2*>(biases);
float2* output2 = reinterpret_cast<float2*>(output);
float2* qkv_add_bias2 = reinterpret_cast<float2*>(qkv_add_bias);
InvokeAddBiasTranspose<float2>(
stream, num_matrices, format, max_threads_per_block,
batch_size, sequence_length, num_heads, H, input2, biases2, output2,
qkv_add_bias2, v_head_size / 2, total_matrix_count);
} else {
InvokeAddBiasTranspose<float>(
stream, num_matrices, format, max_threads_per_block,
batch_size, sequence_length, num_heads, qk_head_size, input, biases, output,
qkv_add_bias, v_head_size, total_matrix_count, do_rotary, original_past_sequence_length);
}
}
template <typename T>
void InvokeAddBiasTransposeTrt(
hipStream_t stream, const int max_threads_per_block,
const int batch_size, const int sequence_length, const int num_heads, const int head_size,
const T* biases, const T* query, const T* key, const T* value, T* output,
bool is_cross_attention, int kv_sequence_length) {
if (!is_cross_attention) {
ORT_ENFORCE(sequence_length == kv_sequence_length);
constexpr int num_matrices = 3;
const dim3 grid(sequence_length, batch_size, num_matrices);
if (head_size * num_heads <= max_threads_per_block) {
const dim3 block(head_size, num_heads, 1);
hipLaunchKernelGGL(( AddBiasTransposeTrt<T>), dim3(grid), dim3(block), 0, stream, query, key, value, biases, output);
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
hipLaunchKernelGGL(( AddBiasTransposeTrtLarge<T>), dim3(grid), dim3(block), 0, stream, head_size, query, key, value, biases, output);
}
} else { // cross attention
// Q: add bias
{
constexpr int num_matrices = 1;
const dim3 grid(sequence_length, batch_size, num_matrices);
if (head_size * num_heads <= max_threads_per_block) {
const dim3 block(head_size, num_heads, 1);
hipLaunchKernelGGL(( AddBiasTransposeTrt<T>), dim3(grid), dim3(block), 0, stream, query, biases, output);
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
hipLaunchKernelGGL(( AddBiasTransposeTrtLarge<T>), dim3(grid), dim3(block), 0, stream, head_size, query, biases, output);
}
}
// KV: add bias and pack kv
{
constexpr int num_matrices = 2;
const dim3 grid(kv_sequence_length, batch_size, num_matrices);
T* packed_kv = output + batch_size * sequence_length * num_heads * head_size;
if (head_size * num_heads <= max_threads_per_block) {
const dim3 block(head_size, num_heads, 1);
hipLaunchKernelGGL(( AddBiasTransposeTrtKV<T>), dim3(grid), dim3(block), 0, stream, key, value, biases, packed_kv);
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
hipLaunchKernelGGL(( AddBiasTransposeTrtKVLarge<T>), dim3(grid), dim3(block), 0, stream, head_size, key, value, biases, packed_kv);
}
}
}
}
template <>
void LaunchAddBiasTransposeTrt(
hipStream_t stream, const int max_threads_per_block,
const int batch_size, const int sequence_length,
const int num_heads, const int head_size,
const float* biases, const float* query, const float* key, const float* value, float* output,
bool is_cross_attention, int kv_sequence_length) {
ORT_ENFORCE(false, "Shall not call this since fused kernel does not support float input.");
}
template <>
void LaunchAddBiasTransposeTrt(
hipStream_t stream, const int max_threads_per_block,
const int batch_size, const int sequence_length,
const int num_heads, const int head_size,
const half* biases, const half* query, const half* key, const half* value, half* output,
bool is_cross_attention, int kv_sequence_length) {
if (0 == (head_size % 4)) {
const int H = head_size / 4;
const Half4* query2 = reinterpret_cast<const Half4*>(query);
const Half4* key2 = reinterpret_cast<const Half4*>(key);
const Half4* value2 = reinterpret_cast<const Half4*>(value);
const Half4* biases2 = reinterpret_cast<const Half4*>(biases);
Half4* output2 = reinterpret_cast<Half4*>(output);
InvokeAddBiasTransposeTrt<Half4>(stream, max_threads_per_block,
batch_size, sequence_length, num_heads, H,
biases2, query2, key2, value2, output2, is_cross_attention, kv_sequence_length);
} else if (0 == (head_size & 1)) {
const int H = head_size / 2;
const half2* query2 = reinterpret_cast<const half2*>(query);
const half2* key2 = reinterpret_cast<const half2*>(key);
const half2* value2 = reinterpret_cast<const half2*>(value);
const half2* biases2 = reinterpret_cast<const half2*>(biases);
half2* output2 = reinterpret_cast<half2*>(output);
InvokeAddBiasTransposeTrt<half2>(stream, max_threads_per_block,
batch_size, sequence_length, num_heads, H,
biases2, query2, key2, value2, output2, is_cross_attention, kv_sequence_length);
} else {
InvokeAddBiasTransposeTrt<half>(stream, max_threads_per_block,
batch_size, sequence_length, num_heads, head_size,
biases, query, key, value, output, is_cross_attention, kv_sequence_length);
}
}
template <typename T>
void InvokeAddBias(
hipStream_t stream, const int max_threads_per_block,
const int batch_size, const int sequence_length, const int kv_sequence_length,
const int num_heads, const int head_size, const int v_head_size,
const T* biases, const T* query, const T* key, const T* value, T* q, T* k, T* v) {
assert(num_heads <= max_threads_per_block);
constexpr int num_matrices = 1;
// Q
{
const dim3 grid(sequence_length, batch_size, num_matrices);
if (head_size * num_heads <= max_threads_per_block) {
const dim3 block(head_size, num_heads, 1);
hipLaunchKernelGGL(( AddBiasTransposeTrt<T>), dim3(grid), dim3(block), 0, stream, query, biases, q);
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
hipLaunchKernelGGL(( AddBiasTransposeTrtLarge<T>), dim3(grid), dim3(block), 0, stream, head_size, query, biases, q);
}
}
// K
{
const dim3 grid(kv_sequence_length, batch_size, num_matrices);
const T* biases_k = biases + num_heads * head_size;
if (head_size * num_heads <= max_threads_per_block) {
const dim3 block(head_size, num_heads, 1);
hipLaunchKernelGGL(( AddBiasTransposeTrt<T>), dim3(grid), dim3(block), 0, stream, key, biases_k, k);
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
hipLaunchKernelGGL(( AddBiasTransposeTrtLarge<T>), dim3(grid), dim3(block), 0, stream, head_size, key, biases_k, k);
}
}
// V
{
const dim3 grid(kv_sequence_length, batch_size, num_matrices);
const T* biases_v = biases + 2 * num_heads * head_size;
if (v_head_size * num_heads <= max_threads_per_block) {
const dim3 block(v_head_size, num_heads, 1);
hipLaunchKernelGGL(( AddBiasTransposeTrt<T>), dim3(grid), dim3(block), 0, stream, value, biases_v, v);
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
hipLaunchKernelGGL(( AddBiasTransposeTrtLarge<T>), dim3(grid), dim3(block), 0, stream, v_head_size, value, biases_v, v);
}
}
}
template <>
void LaunchAddBias(
hipStream_t stream, const int max_threads_per_block,
const int batch_size, const int sequence_length, const int kv_sequence_length,
const int num_heads, const int head_size, const int v_head_size,
const float* biases, const float* query, const float* key, const float* value, float* q, float* k, float* v) {
if (0 == (head_size % 4) && 0 == (v_head_size % 4)) {
const int H = head_size / 4;
const int H_v = v_head_size / 4;
const float4* query2 = reinterpret_cast<const float4*>(query);
const float4* key2 = reinterpret_cast<const float4*>(key);
const float4* value2 = reinterpret_cast<const float4*>(value);
const float4* biases2 = reinterpret_cast<const float4*>(biases);
float4* q2 = reinterpret_cast<float4*>(q);
float4* k2 = reinterpret_cast<float4*>(k);
float4* v2 = reinterpret_cast<float4*>(v);
InvokeAddBias<float4>(stream, max_threads_per_block,
batch_size, sequence_length, kv_sequence_length, num_heads, H, H_v,
biases2, query2, key2, value2, q2, k2, v2);
} else if (0 == (head_size & 1) && 0 == (v_head_size & 1)) {
const int H = head_size / 2;
const int H_v = v_head_size / 2;
const float2* query2 = reinterpret_cast<const float2*>(query);
const float2* key2 = reinterpret_cast<const float2*>(key);
const float2* value2 = reinterpret_cast<const float2*>(value);
const float2* biases2 = reinterpret_cast<const float2*>(biases);
float2* q2 = reinterpret_cast<float2*>(q);
float2* k2 = reinterpret_cast<float2*>(k);
float2* v2 = reinterpret_cast<float2*>(v);
InvokeAddBias<float2>(stream, max_threads_per_block,
batch_size, sequence_length, kv_sequence_length, num_heads, H, H_v,
biases2, query2, key2, value2, q2, k2, v2);
} else {
InvokeAddBias<float>(stream, max_threads_per_block,
batch_size, sequence_length, kv_sequence_length, num_heads, head_size, v_head_size,
biases, query, key, value, q, k, v);
}
}
template <>
void LaunchAddBias(
hipStream_t stream, const int max_threads_per_block,
const int batch_size, const int sequence_length, const int kv_sequence_length,
const int num_heads, const int head_size, const int v_head_size,
const half* biases, const half* query, const half* key, const half* value, half* q, half* k, half* v) {
if (0 == (head_size % 4) && 0 == (v_head_size % 4)) {
const int H = head_size / 4;
const int H_v = v_head_size / 4;
const Half4* query2 = reinterpret_cast<const Half4*>(query);
const Half4* key2 = reinterpret_cast<const Half4*>(key);
const Half4* value2 = reinterpret_cast<const Half4*>(value);
const Half4* biases2 = reinterpret_cast<const Half4*>(biases);
Half4* q2 = reinterpret_cast<Half4*>(q);
Half4* k2 = reinterpret_cast<Half4*>(k);
Half4* v2 = reinterpret_cast<Half4*>(v);
InvokeAddBias<Half4>(stream, max_threads_per_block,
batch_size, sequence_length, kv_sequence_length, num_heads, H, H_v,
biases2, query2, key2, value2, q2, k2, v2);
} else if (0 == (head_size & 1) && 0 == (v_head_size & 1)) {
const int H = head_size / 2;
const int H_v = v_head_size / 2;
const half2* query2 = reinterpret_cast<const half2*>(query);
const half2* key2 = reinterpret_cast<const half2*>(key);
const half2* value2 = reinterpret_cast<const half2*>(value);
const half2* biases2 = reinterpret_cast<const half2*>(biases);
half2* q2 = reinterpret_cast<half2*>(q);
half2* k2 = reinterpret_cast<half2*>(k);
half2* v2 = reinterpret_cast<half2*>(v);
InvokeAddBias<half2>(stream, max_threads_per_block,
batch_size, sequence_length, kv_sequence_length, num_heads, H, H_v,
biases2, query2, key2, value2, q2, k2, v2);
} else {
InvokeAddBias<half>(stream, max_threads_per_block,
batch_size, sequence_length, kv_sequence_length, num_heads, head_size, v_head_size,
biases, query, key, value, q, k, v);
}
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| 3bd3d7ec7fdb1f1bdee476d17c831c6a8dc8ce37.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "contrib_ops/cuda/bert/add_bias_transpose.h"
#include "contrib_ops/cuda/bert/rotary_embedding_util.h"
using namespace onnxruntime::cuda;
namespace onnxruntime {
namespace contrib {
namespace cuda {
template <typename T>
__global__ void AddBiasTransposeTrt(const T* input, const T* biases, T* output) {
// Format 2 for TensorRT fused attention (N*H <= 1024)
// Input: BxSxMxNxH
// Output: BxSxNxMxH
// B is batch_size, S is sequence_length, M is number of matrices, N is num_heads, H is head_size
// This kernel could support hidden size up to 4 * 1024 when T is Half4 and input is half.
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int H = blockDim.x;
const int N = blockDim.y;
const int S = gridDim.x;
const int M = gridDim.z;
const int NH = N * H;
const int offset = (b * S + s) * M * NH;
const int in_offset = offset + m * NH + n * H;
const int out_offset = offset + (n * M + m) * H;
const int h = threadIdx.x;
if (h < H) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
}
}
template <typename T>
__global__ void AddBiasTransposeTrtLarge(const int head_size, const T* input, const T* biases, T* output) {
// Format 2 for TensorRT fused attention (N*H > 1024)
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z;
const int stride = blockDim.x;
const int H = head_size;
const int N = blockDim.y;
const int S = gridDim.x;
const int M = gridDim.z;
const int NH = N * H;
const int offset = (b * S + s) * M * NH;
const int in_offset = offset + m * NH + n * H;
const int out_offset = offset + (n * M + m) * H;
int h = threadIdx.x;
while (h < H) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
h += stride;
}
}
template <typename T>
__global__ void AddBiasTransposeTrt(const T* query, const T* key, const T* value, const T* biases, T* output) {
// Separated Q/K/V inputs for TensorRT fused attention (N*H <= 1024)
// Q: BxSxNxH
// K: BxSxNxH
// V: BxSxNxH
// Output: BxSxNxMxH
// B is batch_size, S is sequence_length, M is number of matrices (3), N is num_heads, H is head_size
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int H = blockDim.x;
const int N = blockDim.y;
const int S = gridDim.x;
const int M = gridDim.z;
const T* input = (m == 0 ? query : (m == 1 ? key : value));
const int NH = N * H;
const int in_offset = (b * S + s) * NH + n * H;
const int out_offset = (b * S + s) * M * NH + (n * M + m) * H;
const int h = threadIdx.x;
if (h < H) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
}
}
template <typename T>
__global__ void AddBiasTransposeTrtLarge(const int head_size,
const T* query, const T* key, const T* value, const T* biases, T* output) {
// Separated Q/K/V inputs for TensorRT fused attention (N*H > 1024)
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int stride = blockDim.x;
const int H = head_size;
const int N = blockDim.y;
const int S = gridDim.x;
const int M = gridDim.z;
const T* input = (m == 0 ? query : (m == 1 ? key : value));
const int NH = N * H;
const int in_offset = (b * S + s) * NH + n * H;
const int out_offset = (b * S + s) * M * NH + (n * M + m) * H;
int h = threadIdx.x;
if (h < H) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
h += stride;
}
}
template <typename T>
__global__ void AddBiasTransposeTrtKV(const T* key, const T* value, const T* biases, T* output) {
// Separated K/V inputs for TensorRT fused cross attention (N*H <= 1024)
// K: BxSxNxH
// V: BxSxNxH
// Output: BxSxNxMxH (packed KV, requires H = H_v)
// B is batch_size, S is sequence_length, M is number of matrices (2), N is num_heads, H is head_size
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int H = blockDim.x;
const int N = blockDim.y;
const int S = gridDim.x;
const int NH = N * H;
const int in_offset = (b * S + s) * NH + n * H;
const T* input = (m == 0 ? key : value);
constexpr int M = 2;
const int out_offset = (b * S + s) * M * NH + (n * M + m) * H;
const int h = threadIdx.x;
if (h < H) {
output[out_offset + h] = input[in_offset + h] + biases[(m + 1) * NH + n * H + h];
}
}
template <typename T>
__global__ void AddBiasTransposeTrtKVLarge(const int head_size,
const T* key, const T* value, const T* biases,
T* output) {
// Separated K/V inputs for TensorRT fused cross attention (N*H > 1024)
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int stride = blockDim.x;
const int H = head_size;
const int N = blockDim.y;
const int S = gridDim.x;
const int NH = N * H;
const int in_offset = (b * S + s) * NH + n * H;
const T* input = (m == 0 ? key : value);
constexpr int M = 2;
const int out_offset = (b * S + s) * M * NH + (n * M + m) * H;
int h = threadIdx.x;
while (h < H) {
output[out_offset + h] = input[in_offset + h] + biases[(m + 1) * NH + n * H + h];
h += stride;
}
}
template <typename T>
__global__ void AddBiasTransposeQKV(int M, const T* input, const T* biases, T* output, T* qkv_add_bias) {
// Format 1 for unfused attention, or fused causal attention
// Input: BxSxMxNxH
// Output: MxBxNxSxH
// qkv_add_bias: BxSxMxNxH
// B is batch_size, S is sequence_length, M is number of matrices, N is num_heads, H is head_size
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int head_size = blockDim.x;
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int H = head_size;
const int NH = num_heads * head_size;
const int NHS = NH * sequence_length;
int in_offset = n * head_size + (m + s * M) * NH + b * NHS * M;
const int out_offset = s * head_size + n * sequence_length * H + b * NHS + m * NHS * batch_size;
const int h = threadIdx.x;
if (h < head_size) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
if (nullptr != qkv_add_bias) {
qkv_add_bias[in_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
}
}
}
#ifndef USE_ROCM
template <typename T>
__global__ void AddBiasTransposeQKV(int M, const T* input, const T* biases, T* output, T* qkv_add_bias,
const int rotary_embedding_dim, const int head_size, const int step,
const int format) {
// AddBiasTransposeQKV with rotary embedding
// Format 1 for unfused attention, or fused causal attention
// Input: BxSxMxNxH
// Output: MxBxNxSxH
// qkv_add_bias: BxSxMxNxH
// Format 2 for fused TRT attention
// Input: BxSxMxNxH
// Output: BxSxNxMxH
// qkv_add_bias: BxSxMxNxH
// Format 3 for cutlass memory efficient attention
// Input: BxSxMxNxH
// Output: MxBxSxNxH
// B is batch_size, S is sequence_length, M is number of matrices, N is num_heads, H is head_size
int n = blockIdx.y;
int s = blockIdx.x;
int b = blockIdx.z;
const int seq_len = (gridDim.x == step) ? s : step;
const int num_heads = gridDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.z;
const int H = head_size;
const int NH = num_heads * head_size;
const int NHS = NH * sequence_length;
constexpr int vec_size = Vec_t<T>::size;
using Vec_t = typename Vec_t<T>::Type;
extern __shared__ __align__(sizeof(float2)) char smem_[];
int tidx = threadIdx.x;
const int head_idx = tidx * vec_size;
if (head_idx < head_size) {
const bool is_masked = head_idx >= head_size;
const int input_offset_base = n * head_size + (s * M) * NH + b * NHS * M;
const int src_q_idx = input_offset_base + head_idx;
const int src_k_idx = input_offset_base + NH + head_idx;
const int src_v_idx = input_offset_base + 2 * NH + head_idx;
Vec_t q, k, v;
Vec_t q_bias, k_bias, v_bias;
if (!is_masked) {
q = *reinterpret_cast<const Vec_t*>(&input[src_q_idx]);
k = *reinterpret_cast<const Vec_t*>(&input[src_k_idx]);
v = *reinterpret_cast<const Vec_t*>(&input[src_v_idx]);
q_bias = *reinterpret_cast<const Vec_t*>(&biases[n * H + head_idx]);
k_bias = *reinterpret_cast<const Vec_t*>(&biases[NH + n * H + head_idx]);
v_bias = *reinterpret_cast<const Vec_t*>(&biases[2 * NH + n * H + head_idx]);
}
q = add_vec(q, q_bias);
k = add_vec(k, k_bias);
v = add_vec(v, v_bias);
const bool do_rotary = !is_masked && vec_size * tidx < rotary_embedding_dim;
T* q_smem = reinterpret_cast<T*>(smem_);
T* k_smem = q_smem + rotary_embedding_dim;
const int half_rotary_dim = rotary_embedding_dim / 2;
const int half_idx = (head_idx) / half_rotary_dim;
const int intra_half_idx = (head_idx) % half_rotary_dim;
const int smem_pitch = half_rotary_dim;
if (do_rotary) {
*reinterpret_cast<Vec_t*>(q_smem + half_idx * smem_pitch + intra_half_idx) = q;
*reinterpret_cast<Vec_t*>(k_smem + half_idx * smem_pitch + intra_half_idx) = k;
}
__syncthreads();
const int transpose_idx = half_idx * (half_rotary_dim / 2) + intra_half_idx / 2;
constexpr int tidx_factor = vec_size / 2;
if (do_rotary) {
vec_from_smem_transpose(q, q_smem, transpose_idx, smem_pitch);
vec_from_smem_transpose(k, k_smem, transpose_idx, smem_pitch);
apply_rotary_embedding(q, k, transpose_idx / tidx_factor, rotary_embedding_dim, seq_len);
write_smem_transpose(q, q_smem, transpose_idx, smem_pitch);
write_smem_transpose(k, k_smem, transpose_idx, smem_pitch);
}
__syncthreads();
if (do_rotary) {
q = *reinterpret_cast<Vec_t*>(q_smem + half_idx * smem_pitch + intra_half_idx);
k = *reinterpret_cast<Vec_t*>(k_smem + half_idx * smem_pitch + intra_half_idx);
}
int dest_q_idx;
int dest_k_idx;
int dest_v_idx;
// Format 1
if (format == 1) {
const int output_offset_base = s * head_size + n * sequence_length * H + b * NHS;
dest_q_idx = output_offset_base + head_idx;
dest_k_idx = output_offset_base + NHS * batch_size + head_idx;
dest_v_idx = output_offset_base + 2 * NHS * batch_size + head_idx;
}
// Format 2
if (format == 2) {
const int output_offset_base = M * (b * NHS + s * NH + n * H);
dest_q_idx = output_offset_base + head_idx;
dest_k_idx = output_offset_base + H + head_idx;
dest_v_idx = output_offset_base + 2 * H + head_idx;
}
// Format 3
if (format == 3) {
const int output_offset_base = n * H + s * NH + b * NHS;
dest_q_idx = output_offset_base + head_idx;
dest_k_idx = output_offset_base + NHS * batch_size + head_idx;
dest_v_idx = output_offset_base + 2 * NHS * batch_size + head_idx;
}
if (!is_masked) {
*reinterpret_cast<Vec_t*>(&output[dest_q_idx]) = q;
*reinterpret_cast<Vec_t*>(&output[dest_k_idx]) = k;
*reinterpret_cast<Vec_t*>(&output[dest_v_idx]) = v;
if (nullptr != qkv_add_bias) {
*reinterpret_cast<Vec_t*>(&qkv_add_bias[src_q_idx]) = q;
*reinterpret_cast<Vec_t*>(&qkv_add_bias[src_k_idx]) = k;
*reinterpret_cast<Vec_t*>(&qkv_add_bias[src_v_idx]) = v;
}
}
}
}
#endif
// this suppose 3 matrix in total
template <typename T>
__global__ void AddBiasTransposeQKV(const T* input, const T* biases, T* output, int v_head_size) {
// Format 1 for unfused attention
// Input: BxSx(NxH + NxH + NxH_v) (Packed QKV where K and V has different hidden sizes)
// Output: BxNxSxH + BxNxSxH + BxNxSxH_v
// B is batch_size, S is sequence_length, N is num_heads, H is qk_head_size, H_v is v_head_size
int n = threadIdx.y; // head_num_id
int s = blockIdx.x; // sequence_id
int b = blockIdx.y; // batch_id
int m = blockIdx.z; // matrix id (Q=0, K=1, V=2)
const int h = threadIdx.x; // head_element_id
const int qk_head_size = blockDim.x;
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int head_size = (m == 2 ? v_head_size : qk_head_size);
const int total_head_size = num_heads * (qk_head_size + qk_head_size + v_head_size);
int in_offset;
int out_offset;
int bias_offset;
in_offset = b * (total_head_size * sequence_length) + // B
s * (total_head_size) + // S
m * (qk_head_size * num_heads) + // M
n * head_size + // N
h; // H
out_offset = m * (num_heads * qk_head_size * sequence_length * batch_size) + // M
b * (num_heads * head_size * sequence_length) + // B
n * (sequence_length * head_size) + // N
s * (head_size) + // S
h; // H
bias_offset = m * (num_heads * qk_head_size) + // M
n * (head_size) + // N
h; // H
if (h < head_size) {
output[out_offset] = input[in_offset] + biases[bias_offset];
}
}
template <typename T>
__global__ void AddBiasTransposeQKVLarge(const int head_size, const T* input, const T* biases, T* output,
T* qkv_add_bias, const int M) {
// Format 1 for unfused attention (N*H > 1024), or fused causal attention
// Input: BxSxMxNxH (Packed QKV)
// Output: MxBxNxSxH
// qkv_add_bias: BxSxMxNxH
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int stride = blockDim.x;
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int H = head_size;
const int NH = num_heads * H;
const int NHS = NH * sequence_length;
int in_offset = n * H + (m + s * M) * NH + b * NHS * M;
const int out_offset = s * H + n * sequence_length * H + b * NHS + m * NHS * batch_size;
int h = threadIdx.x;
while (h < H) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
if (nullptr != qkv_add_bias) {
qkv_add_bias[in_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
}
h += stride;
}
}
template <typename T>
__global__ void AddBiasTransposeCutlass(const T* input, const T* biases, T* output, int v_head_size) {
// Format 3 for cutlass memory efficient attention
// Input: BxSx(NxH + NxH + NxH_v) (Packed QKV where K and V has different hidden sizes)
// Output: BxNxSxH + BxNxSxH + BxNxSxH_v
// B is batch_size, S is sequence_length, N is num_heads, H is qk_head_size, H_v is v_head_size
int n = threadIdx.y; // head_num_id
int s = blockIdx.x; // sequence_id
int b = blockIdx.y; // batch_id
int m = blockIdx.z; // matrix id (Q=0, K=1, V=2)
const int h = threadIdx.x; // head_element_id
const int qk_head_size = blockDim.x;
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int head_size = (m == 2 ? v_head_size : qk_head_size);
const int total_head_size = num_heads * (qk_head_size + qk_head_size + v_head_size);
int in_offset;
int out_offset;
int bias_offset;
in_offset = b * (total_head_size * sequence_length) + // B
s * (total_head_size) + // S
m * (qk_head_size * num_heads) + // M
n * head_size + // N
h; // H
out_offset = m * (num_heads * qk_head_size * sequence_length * batch_size) + // M
b * (num_heads * head_size * sequence_length) + // B
s * (num_heads * head_size) + // S
n * (head_size) + // N
h; // H
bias_offset = m * (num_heads * qk_head_size) + // M
n * (head_size) + // N
h; // H
if (h < head_size) {
output[out_offset] = input[in_offset] + biases[bias_offset];
}
}
template <typename T>
__global__ void AddBiasUnpack(int M, const T* input, const T* biases, T* output) {
// Format 4 to unpack TRT packed input format for memory efficient attention.
// Input: BxSxNxMxH
// Output: MxBxSxNxH
// B is batch_size, S is sequence_length, M is number of matrices, N is num_heads, H is head_size
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int head_size = blockDim.x;
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int H = head_size;
const int NH = num_heads * head_size;
const int NHS = NH * sequence_length;
int in_offset = m * head_size + n * M * H + (s * NH + b * NHS) * M;
const int out_offset = n * head_size + s * NH + b * NHS + m * NHS * batch_size;
const int h = threadIdx.x;
if (h < head_size) {
if (biases != nullptr) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
} else {
output[out_offset + h] = input[in_offset + h];
}
}
}
template <typename T>
__global__ void AddBiasTransposeCutlass(int M, const T* input, const T* biases, T* output) {
// Format 3 for cutlass memory efficient attention
// Input: BxSxMxNxH
// Output: MxBxSxNxH
// B is batch_size, S is sequence_length, M is number of matrices, N is num_heads, H is head_size
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int head_size = blockDim.x;
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int H = head_size;
const int NH = num_heads * head_size;
const int NHS = NH * sequence_length;
int in_offset = n * head_size + (m + s * M) * NH + b * NHS * M;
const int out_offset = n * head_size + s * NH + b * NHS + m * NHS * batch_size;
const int h = threadIdx.x;
if (h < head_size) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
}
}
template <typename T>
__global__ void AddBiasTransposeCutlassLarge(const int head_size, const T* input, const T* biases, T* output,
const int M) {
// Format 3 for cutlass memory efficient attention
// Input: BxSxMxNxH (Packed QKV)
// Output: MxBxSxNxH
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int stride = blockDim.x;
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int H = head_size;
const int NH = num_heads * H;
const int NHS = NH * sequence_length;
int in_offset = n * H + (m + s * M) * NH + b * NHS * M;
const int out_offset = n * H + s * NH + b * NHS + m * NHS * batch_size;
int h = threadIdx.x;
while (h < H) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
h += stride;
}
}
template <typename T>
__global__ void AddBiasTranspose(const T* input, const T* biases, T* output) {
// Format 0 for Separated Q, K, V (N*H <= 1024)
// Input: MxBxSxNxH
// Output: MxBxNxSxH
// B is batch_size, S is sequence_length, M is number of matrices, N is num_heads, H is head_size
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int head_size = blockDim.x;
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int H = head_size;
const int NH = num_heads * head_size;
const int NHS = NH * sequence_length;
int in_offset = n * H + s * NH + (b + m * batch_size) * NHS;
const int out_offset = (s + n * sequence_length) * H + (b + m * batch_size) * NHS;
const int h = threadIdx.x;
if (h < head_size) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
}
}
template <typename T>
__global__ void AddBiasTransposeLarge(const int head_size, const T* input, const T* biases, T* output) {
// Format 0 for Separated Q, K, V (N*H > 1024)
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int stride = blockDim.x;
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int H = head_size;
const int NH = num_heads * H;
const int NHS = NH * sequence_length;
int in_offset = n * H + s * NH + (b + m * batch_size) * NHS;
const int out_offset = (s + n * sequence_length) * H + (b + m * batch_size) * NHS;
int h = threadIdx.x;
while (h < H) {
output[out_offset + h] = input[in_offset + h] + biases[m * NH + n * H + h];
h += stride;
}
}
template <typename T>
void InvokeAddBiasTranspose(
cudaStream_t stream, const int num_matrices, const int format, const int max_threads_per_block,
const int batch_size, const int sequence_length, const int num_heads, const int qk_head_size,
const T* input, const T* biases, T* output, T* qkv_add_bias, const int v_head_size, int total_matrix_count,
bool do_rotary = false, int original_past_sequence_length = 0) {
assert(num_heads <= max_threads_per_block);
if (do_rotary) {
#ifdef USE_ROCM
ORT_THROW("Rotary Attention is not supported on ROCm");
#elif !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 530
if (format != 1 && format != 2 && format != 3) {
ORT_THROW("format must be 1, 2 or 3 for rotary attention");
}
if (v_head_size != -1 && qk_head_size != v_head_size) {
ORT_THROW("qk_head_size must be equal to v_head_size for rotary attention");
}
const int step = original_past_sequence_length == 0 ? sequence_length : original_past_sequence_length;
size_t smem_size = 2 * qk_head_size * sizeof(T);
const dim3 grid(sequence_length, num_heads, batch_size);
const dim3 block((qk_head_size / 2 + 31) / 32 * 32, 1, 1);
AddBiasTransposeQKV<T><<<grid, block, smem_size, stream>>>(total_matrix_count, input, biases, output,
qkv_add_bias, qk_head_size, qk_head_size,
step, format);
#else
ORT_THROW("Rotary Attention is supported on sm >= 530. Current sm is", __CUDA_ARCH__);
#endif
return;
}
const dim3 grid(sequence_length, batch_size, num_matrices);
if (qk_head_size * num_heads <= max_threads_per_block) {
const dim3 block(qk_head_size, num_heads, 1);
if (format == 2) {
AddBiasTransposeTrt<T><<<grid, block, 0, stream>>>(input, biases, output);
} else if (format == 1) {
if (v_head_size == -1 || qk_head_size == v_head_size) {
AddBiasTransposeQKV<T><<<grid, block, 0, stream>>>(total_matrix_count, input, biases, output, qkv_add_bias);
} else {
ORT_ENFORCE(total_matrix_count == 3);
AddBiasTransposeQKV<T><<<grid, block, 0, stream>>>(input, biases, output, v_head_size);
}
} else if (format == 3) {
if (v_head_size == -1 || qk_head_size == v_head_size) {
AddBiasTransposeCutlass<T><<<grid, block, 0, stream>>>(total_matrix_count, input, biases, output);
} else {
ORT_ENFORCE(total_matrix_count == 3);
AddBiasTransposeCutlass<T><<<grid, block, 0, stream>>>(input, biases, output, v_head_size);
}
} else if (format == 4) { // format == 4
AddBiasUnpack<T><<<grid, block, 0, stream>>>(total_matrix_count, input, biases, output);
} else { // format == 0
AddBiasTranspose<T><<<grid, block, 0, stream>>>(input, biases, output);
}
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
if (format == 2) {
AddBiasTransposeTrtLarge<T><<<grid, block, 0, stream>>>(qk_head_size, input, biases, output);
} else if (format == 1) {
if (v_head_size == -1 || qk_head_size == v_head_size) {
AddBiasTransposeQKVLarge<T><<<grid, block, 0, stream>>>(qk_head_size, input, biases, output,
qkv_add_bias, total_matrix_count);
} else {
// It is rare for hidden size > 4096 (for half precision) and qk_head_size != v_head_size.
ORT_THROW("AddBiasTranspose (format 1) not implemented for hidden_size > max_threads_per_block when qk_head_size != v_head_size");
}
} else if (format == 3) {
if (v_head_size == -1 || qk_head_size == v_head_size) {
AddBiasTransposeCutlassLarge<T><<<grid, block, 0, stream>>>(qk_head_size, input, biases, output,
total_matrix_count);
} else {
ORT_THROW("AddBiasTranspose (format 3) not implemented for hidden_size > max_threads_per_block when qk_head_size != v_head_size");
}
} else if (format == 4) { // format == 4
ORT_THROW("AddBiasTranspose (format 4) not implemented for hidden_size > max_threads_per_block");
} else { // format 0
AddBiasTransposeLarge<T><<<grid, block, 0, stream>>>(qk_head_size, input, biases, output);
}
}
}
template <>
void LaunchAddBiasTranspose(
cudaStream_t stream, const int num_matrices, const int format, const int max_threads_per_block,
const int batch_size, const int sequence_length, const int num_heads, const int qk_head_size,
const half* input, const half* biases, half* output, bool enable_half4, const int v_head_size,
half* qkv_add_bias, int total_matrix_count, bool do_rotary, int original_past_sequence_length) {
total_matrix_count = std::max(num_matrices, total_matrix_count);
if (enable_half4 && 0 == (qk_head_size % 4) && (v_head_size == -1 || 0 == (v_head_size % 4)) && !do_rotary) {
const int H = qk_head_size / 4;
const int H_v = v_head_size / 4;
const Half4* input2 = reinterpret_cast<const Half4*>(input);
const Half4* biases2 = reinterpret_cast<const Half4*>(biases);
Half4* output2 = reinterpret_cast<Half4*>(output);
Half4* qkv_add_bias2 = reinterpret_cast<Half4*>(qkv_add_bias);
InvokeAddBiasTranspose<Half4>(stream, num_matrices, format, max_threads_per_block,
batch_size, sequence_length, num_heads, H, input2, biases2, output2,
qkv_add_bias2, H_v, total_matrix_count);
} else if (0 == (qk_head_size & 1) && (v_head_size == -1 || 0 == (v_head_size & 1)) && !do_rotary) {
const int H = qk_head_size / 2;
const int H_v = v_head_size / 2;
const half2* input2 = reinterpret_cast<const half2*>(input);
const half2* biases2 = reinterpret_cast<const half2*>(biases);
half2* output2 = reinterpret_cast<half2*>(output);
half2* qkv_add_bias2 = reinterpret_cast<half2*>(qkv_add_bias);
InvokeAddBiasTranspose<half2>(stream, num_matrices, format, max_threads_per_block,
batch_size, sequence_length, num_heads, H, input2, biases2, output2,
qkv_add_bias2, H_v, total_matrix_count);
} else {
InvokeAddBiasTranspose<half>(
stream, num_matrices, format, max_threads_per_block,
batch_size, sequence_length, num_heads, qk_head_size, input, biases, output,
qkv_add_bias, v_head_size, total_matrix_count, do_rotary, original_past_sequence_length);
}
}
template <>
void LaunchAddBiasTranspose(
cudaStream_t stream, const int num_matrices, const int format, const int max_threads_per_block,
const int batch_size, const int sequence_length, const int num_heads, const int qk_head_size,
const float* input, const float* biases, float* output, bool /*enable_half4*/,
const int v_head_size, float* qkv_add_bias, int total_matrix_count, bool do_rotary,
int original_past_sequence_length) {
total_matrix_count = std::max(num_matrices, total_matrix_count);
if (0 == (qk_head_size % 4) && (v_head_size == -1 || 0 == (v_head_size % 4)) && !do_rotary) {
const int H = qk_head_size / 4;
const float4* input2 = reinterpret_cast<const float4*>(input);
const float4* biases2 = reinterpret_cast<const float4*>(biases);
float4* output2 = reinterpret_cast<float4*>(output);
float4* qkv_add_bias2 = reinterpret_cast<float4*>(qkv_add_bias);
InvokeAddBiasTranspose<float4>(
stream, num_matrices, format, max_threads_per_block,
batch_size, sequence_length, num_heads, H, input2, biases2, output2,
qkv_add_bias2, v_head_size / 4, total_matrix_count);
} else if (0 == (qk_head_size & 1) && (v_head_size == -1 || 0 == (v_head_size & 1)) && !do_rotary) {
const int H = qk_head_size / 2;
const float2* input2 = reinterpret_cast<const float2*>(input);
const float2* biases2 = reinterpret_cast<const float2*>(biases);
float2* output2 = reinterpret_cast<float2*>(output);
float2* qkv_add_bias2 = reinterpret_cast<float2*>(qkv_add_bias);
InvokeAddBiasTranspose<float2>(
stream, num_matrices, format, max_threads_per_block,
batch_size, sequence_length, num_heads, H, input2, biases2, output2,
qkv_add_bias2, v_head_size / 2, total_matrix_count);
} else {
InvokeAddBiasTranspose<float>(
stream, num_matrices, format, max_threads_per_block,
batch_size, sequence_length, num_heads, qk_head_size, input, biases, output,
qkv_add_bias, v_head_size, total_matrix_count, do_rotary, original_past_sequence_length);
}
}
template <typename T>
void InvokeAddBiasTransposeTrt(
cudaStream_t stream, const int max_threads_per_block,
const int batch_size, const int sequence_length, const int num_heads, const int head_size,
const T* biases, const T* query, const T* key, const T* value, T* output,
bool is_cross_attention, int kv_sequence_length) {
if (!is_cross_attention) {
ORT_ENFORCE(sequence_length == kv_sequence_length);
constexpr int num_matrices = 3;
const dim3 grid(sequence_length, batch_size, num_matrices);
if (head_size * num_heads <= max_threads_per_block) {
const dim3 block(head_size, num_heads, 1);
AddBiasTransposeTrt<T><<<grid, block, 0, stream>>>(query, key, value, biases, output);
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
AddBiasTransposeTrtLarge<T><<<grid, block, 0, stream>>>(head_size, query, key, value, biases, output);
}
} else { // cross attention
// Q: add bias
{
constexpr int num_matrices = 1;
const dim3 grid(sequence_length, batch_size, num_matrices);
if (head_size * num_heads <= max_threads_per_block) {
const dim3 block(head_size, num_heads, 1);
AddBiasTransposeTrt<T><<<grid, block, 0, stream>>>(query, biases, output);
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
AddBiasTransposeTrtLarge<T><<<grid, block, 0, stream>>>(head_size, query, biases, output);
}
}
// KV: add bias and pack kv
{
constexpr int num_matrices = 2;
const dim3 grid(kv_sequence_length, batch_size, num_matrices);
T* packed_kv = output + batch_size * sequence_length * num_heads * head_size;
if (head_size * num_heads <= max_threads_per_block) {
const dim3 block(head_size, num_heads, 1);
AddBiasTransposeTrtKV<T><<<grid, block, 0, stream>>>(key, value, biases, packed_kv);
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
AddBiasTransposeTrtKVLarge<T><<<grid, block, 0, stream>>>(head_size, key, value, biases, packed_kv);
}
}
}
}
template <>
void LaunchAddBiasTransposeTrt(
cudaStream_t stream, const int max_threads_per_block,
const int batch_size, const int sequence_length,
const int num_heads, const int head_size,
const float* biases, const float* query, const float* key, const float* value, float* output,
bool is_cross_attention, int kv_sequence_length) {
ORT_ENFORCE(false, "Shall not call this since fused kernel does not support float input.");
}
template <>
void LaunchAddBiasTransposeTrt(
cudaStream_t stream, const int max_threads_per_block,
const int batch_size, const int sequence_length,
const int num_heads, const int head_size,
const half* biases, const half* query, const half* key, const half* value, half* output,
bool is_cross_attention, int kv_sequence_length) {
if (0 == (head_size % 4)) {
const int H = head_size / 4;
const Half4* query2 = reinterpret_cast<const Half4*>(query);
const Half4* key2 = reinterpret_cast<const Half4*>(key);
const Half4* value2 = reinterpret_cast<const Half4*>(value);
const Half4* biases2 = reinterpret_cast<const Half4*>(biases);
Half4* output2 = reinterpret_cast<Half4*>(output);
InvokeAddBiasTransposeTrt<Half4>(stream, max_threads_per_block,
batch_size, sequence_length, num_heads, H,
biases2, query2, key2, value2, output2, is_cross_attention, kv_sequence_length);
} else if (0 == (head_size & 1)) {
const int H = head_size / 2;
const half2* query2 = reinterpret_cast<const half2*>(query);
const half2* key2 = reinterpret_cast<const half2*>(key);
const half2* value2 = reinterpret_cast<const half2*>(value);
const half2* biases2 = reinterpret_cast<const half2*>(biases);
half2* output2 = reinterpret_cast<half2*>(output);
InvokeAddBiasTransposeTrt<half2>(stream, max_threads_per_block,
batch_size, sequence_length, num_heads, H,
biases2, query2, key2, value2, output2, is_cross_attention, kv_sequence_length);
} else {
InvokeAddBiasTransposeTrt<half>(stream, max_threads_per_block,
batch_size, sequence_length, num_heads, head_size,
biases, query, key, value, output, is_cross_attention, kv_sequence_length);
}
}
template <typename T>
void InvokeAddBias(
cudaStream_t stream, const int max_threads_per_block,
const int batch_size, const int sequence_length, const int kv_sequence_length,
const int num_heads, const int head_size, const int v_head_size,
const T* biases, const T* query, const T* key, const T* value, T* q, T* k, T* v) {
assert(num_heads <= max_threads_per_block);
constexpr int num_matrices = 1;
// Q
{
const dim3 grid(sequence_length, batch_size, num_matrices);
if (head_size * num_heads <= max_threads_per_block) {
const dim3 block(head_size, num_heads, 1);
AddBiasTransposeTrt<T><<<grid, block, 0, stream>>>(query, biases, q);
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
AddBiasTransposeTrtLarge<T><<<grid, block, 0, stream>>>(head_size, query, biases, q);
}
}
// K
{
const dim3 grid(kv_sequence_length, batch_size, num_matrices);
const T* biases_k = biases + num_heads * head_size;
if (head_size * num_heads <= max_threads_per_block) {
const dim3 block(head_size, num_heads, 1);
AddBiasTransposeTrt<T><<<grid, block, 0, stream>>>(key, biases_k, k);
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
AddBiasTransposeTrtLarge<T><<<grid, block, 0, stream>>>(head_size, key, biases_k, k);
}
}
// V
{
const dim3 grid(kv_sequence_length, batch_size, num_matrices);
const T* biases_v = biases + 2 * num_heads * head_size;
if (v_head_size * num_heads <= max_threads_per_block) {
const dim3 block(v_head_size, num_heads, 1);
AddBiasTransposeTrt<T><<<grid, block, 0, stream>>>(value, biases_v, v);
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
AddBiasTransposeTrtLarge<T><<<grid, block, 0, stream>>>(v_head_size, value, biases_v, v);
}
}
}
template <>
void LaunchAddBias(
cudaStream_t stream, const int max_threads_per_block,
const int batch_size, const int sequence_length, const int kv_sequence_length,
const int num_heads, const int head_size, const int v_head_size,
const float* biases, const float* query, const float* key, const float* value, float* q, float* k, float* v) {
if (0 == (head_size % 4) && 0 == (v_head_size % 4)) {
const int H = head_size / 4;
const int H_v = v_head_size / 4;
const float4* query2 = reinterpret_cast<const float4*>(query);
const float4* key2 = reinterpret_cast<const float4*>(key);
const float4* value2 = reinterpret_cast<const float4*>(value);
const float4* biases2 = reinterpret_cast<const float4*>(biases);
float4* q2 = reinterpret_cast<float4*>(q);
float4* k2 = reinterpret_cast<float4*>(k);
float4* v2 = reinterpret_cast<float4*>(v);
InvokeAddBias<float4>(stream, max_threads_per_block,
batch_size, sequence_length, kv_sequence_length, num_heads, H, H_v,
biases2, query2, key2, value2, q2, k2, v2);
} else if (0 == (head_size & 1) && 0 == (v_head_size & 1)) {
const int H = head_size / 2;
const int H_v = v_head_size / 2;
const float2* query2 = reinterpret_cast<const float2*>(query);
const float2* key2 = reinterpret_cast<const float2*>(key);
const float2* value2 = reinterpret_cast<const float2*>(value);
const float2* biases2 = reinterpret_cast<const float2*>(biases);
float2* q2 = reinterpret_cast<float2*>(q);
float2* k2 = reinterpret_cast<float2*>(k);
float2* v2 = reinterpret_cast<float2*>(v);
InvokeAddBias<float2>(stream, max_threads_per_block,
batch_size, sequence_length, kv_sequence_length, num_heads, H, H_v,
biases2, query2, key2, value2, q2, k2, v2);
} else {
InvokeAddBias<float>(stream, max_threads_per_block,
batch_size, sequence_length, kv_sequence_length, num_heads, head_size, v_head_size,
biases, query, key, value, q, k, v);
}
}
template <>
void LaunchAddBias(
cudaStream_t stream, const int max_threads_per_block,
const int batch_size, const int sequence_length, const int kv_sequence_length,
const int num_heads, const int head_size, const int v_head_size,
const half* biases, const half* query, const half* key, const half* value, half* q, half* k, half* v) {
if (0 == (head_size % 4) && 0 == (v_head_size % 4)) {
const int H = head_size / 4;
const int H_v = v_head_size / 4;
const Half4* query2 = reinterpret_cast<const Half4*>(query);
const Half4* key2 = reinterpret_cast<const Half4*>(key);
const Half4* value2 = reinterpret_cast<const Half4*>(value);
const Half4* biases2 = reinterpret_cast<const Half4*>(biases);
Half4* q2 = reinterpret_cast<Half4*>(q);
Half4* k2 = reinterpret_cast<Half4*>(k);
Half4* v2 = reinterpret_cast<Half4*>(v);
InvokeAddBias<Half4>(stream, max_threads_per_block,
batch_size, sequence_length, kv_sequence_length, num_heads, H, H_v,
biases2, query2, key2, value2, q2, k2, v2);
} else if (0 == (head_size & 1) && 0 == (v_head_size & 1)) {
const int H = head_size / 2;
const int H_v = v_head_size / 2;
const half2* query2 = reinterpret_cast<const half2*>(query);
const half2* key2 = reinterpret_cast<const half2*>(key);
const half2* value2 = reinterpret_cast<const half2*>(value);
const half2* biases2 = reinterpret_cast<const half2*>(biases);
half2* q2 = reinterpret_cast<half2*>(q);
half2* k2 = reinterpret_cast<half2*>(k);
half2* v2 = reinterpret_cast<half2*>(v);
InvokeAddBias<half2>(stream, max_threads_per_block,
batch_size, sequence_length, kv_sequence_length, num_heads, H, H_v,
biases2, query2, key2, value2, q2, k2, v2);
} else {
InvokeAddBias<half>(stream, max_threads_per_block,
batch_size, sequence_length, kv_sequence_length, num_heads, head_size, v_head_size,
biases, query, key, value, q, k, v);
}
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
aecfa0ae2f85b61f9503cd204689689e964750a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void k2_mul(float *data, float val) {
data[threadIdx.x] *= val;
} | aecfa0ae2f85b61f9503cd204689689e964750a8.cu | #include "includes.h"
__global__ void k2_mul(float *data, float val) {
data[threadIdx.x] *= val;
} |
77a741c868533093c48807afef37d6f014df2441.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
%%cu
#include<stdio.h>
#include<time.h>
void intarr(int* arr,int N);
void printarr(int* arr,int N);
__global__
void cal(int *arr,int N)
{
int tid = threadIdx.x;
int no_threads = blockDim.x;
//printf("tid %d\n",tid);
//printf("no %d\n",no_threads);
int step =1;
while(no_threads>0)
{
//printf("tid %d\n",tid);
if(tid<no_threads)
{
int f = tid*step*2;
int s = f + step;
arr[f] += arr[s];
}
no_threads>>=1;
step<<=1;
}
}
__global__
void maxcal(int *arr,int N)
{
int tid = threadIdx.x;
int no_threads = blockDim.x;
//printf("tid %d\n",tid);
//printf("no %d\n",no_threads);
int step =1;
while(no_threads>0)
{
//printf("tid %d\n",tid);
if(tid<no_threads)
{
int f = tid*step*2;
int s = f + step;
if(arr[f]<arr[s])
arr[f] = arr[s];
}
no_threads>>=1;
step<<=1;
}
}
__global__
void stdcal(int *arr,int N,int avg)
{
int tid = threadIdx.x;
int no_threads = blockDim.x;
//printf("tid %d\n",tid);
//printf("no %d\n",no_threads);
int step =1;
int f = tid*step*2;
int s = f + step;
arr[f] = (arr[f] - avg)*(arr[f] - avg);
arr[s] = (arr[s] - avg)*(arr[s] - avg);
}
int main()
{
srand(time(NULL));
int* a;
int* d_a;
const int N = 4;
const int size = sizeof(a)*N;
a = (int *)malloc(size);
intarr(a,N);
printf("Initial\n");
printarr(a,N);
hipMalloc(&d_a,size);
hipMemcpy(d_a,a,size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cal), dim3(1),dim3(N/2), 0, 0, d_a,N);
hipMemcpy(a,d_a,size,hipMemcpyDeviceToHost);
int sum = a[0];
printf("Final sum %d\n",a[0]);
printf("Final avg %d\n",sum/N);
//maxcal<<<1,N/2>>>(d_a,N);
hipMemcpy(d_a,a,size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( stdcal), dim3(1),dim3(N/2), 0, 0, d_a,N,sum/N);
hipMemcpy(a,d_a,size,hipMemcpyDeviceToHost);
printf("Final\n");
printarr(a,N);
}
void intarr(int* arr,int N)
{
for(int i=0;i<N;i++)
{
arr[i] = rand()%N;
}
}
void printarr(int* arr,int N)
{
for(int i=0;i<N;i++)
{
printf("%d \n",arr[i]);
}
}
| 77a741c868533093c48807afef37d6f014df2441.cu | %%cu
#include<stdio.h>
#include<time.h>
void intarr(int* arr,int N);
void printarr(int* arr,int N);
__global__
void cal(int *arr,int N)
{
int tid = threadIdx.x;
int no_threads = blockDim.x;
//printf("tid %d\n",tid);
//printf("no %d\n",no_threads);
int step =1;
while(no_threads>0)
{
//printf("tid %d\n",tid);
if(tid<no_threads)
{
int f = tid*step*2;
int s = f + step;
arr[f] += arr[s];
}
no_threads>>=1;
step<<=1;
}
}
__global__
void maxcal(int *arr,int N)
{
int tid = threadIdx.x;
int no_threads = blockDim.x;
//printf("tid %d\n",tid);
//printf("no %d\n",no_threads);
int step =1;
while(no_threads>0)
{
//printf("tid %d\n",tid);
if(tid<no_threads)
{
int f = tid*step*2;
int s = f + step;
if(arr[f]<arr[s])
arr[f] = arr[s];
}
no_threads>>=1;
step<<=1;
}
}
__global__
void stdcal(int *arr,int N,int avg)
{
int tid = threadIdx.x;
int no_threads = blockDim.x;
//printf("tid %d\n",tid);
//printf("no %d\n",no_threads);
int step =1;
int f = tid*step*2;
int s = f + step;
arr[f] = (arr[f] - avg)*(arr[f] - avg);
arr[s] = (arr[s] - avg)*(arr[s] - avg);
}
int main()
{
srand(time(NULL));
int* a;
int* d_a;
const int N = 4;
const int size = sizeof(a)*N;
a = (int *)malloc(size);
intarr(a,N);
printf("Initial\n");
printarr(a,N);
cudaMalloc(&d_a,size);
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cal<<<1,N/2>>>(d_a,N);
cudaMemcpy(a,d_a,size,cudaMemcpyDeviceToHost);
int sum = a[0];
printf("Final sum %d\n",a[0]);
printf("Final avg %d\n",sum/N);
//maxcal<<<1,N/2>>>(d_a,N);
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
stdcal<<<1,N/2>>>(d_a,N,sum/N);
cudaMemcpy(a,d_a,size,cudaMemcpyDeviceToHost);
printf("Final\n");
printarr(a,N);
}
void intarr(int* arr,int N)
{
for(int i=0;i<N;i++)
{
arr[i] = rand()%N;
}
}
void printarr(int* arr,int N)
{
for(int i=0;i<N;i++)
{
printf("%d \n",arr[i]);
}
}
|
9242c4f9fcba65df34e724f7484cc9660e6dad47.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
// CUSTOM -------------------
template <typename Dtype>
__global__ void zcaffe_gpu_safeinv_kernel(const int n, const Dtype* in, Dtype* out, const Dtype numerator) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = (in[index] == Dtype(0.)) ? 0: (numerator/in[index]);
}
}
template <typename Dtype>
void zcaffe_gpu_safeinv( const int n, const Dtype* in, Dtype* out, const Dtype numerator ) {
hipLaunchKernelGGL(( zcaffe_gpu_safeinv_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n, in, out, numerator);
}
template void zcaffe_gpu_safeinv<float>( const int n, const float* in, float* out, const float numerator );
template void zcaffe_gpu_safeinv<double>( const int n, const double* in, double* out, const double numerator );
template <typename Dtype>
__global__ void zcaffe_gpu_blockcopy_kernel(const int n,
const int block_size, const int in_stride, const Dtype* in,
const int out_stride, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
int b = index / block_size;
int o = index % block_size;
out[b*out_stride+o] = in[b*in_stride+o];
}
}
template <typename Dtype>
void zcaffe_gpu_blockcopy( const int num_block,
const int block_size, const int in_stride, const Dtype* in,
const int out_stride, Dtype* out ) {
const int n = num_block*block_size;
hipLaunchKernelGGL(( zcaffe_gpu_blockcopy_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n, block_size, in_stride, in, out_stride, out);
}
template void zcaffe_gpu_blockcopy<unsigned int>( const int num_block,
const int block_size, const int in_stride, const unsigned int* in,
const int out_stride, unsigned int* out );
template void zcaffe_gpu_blockcopy<int>( const int num_block,
const int block_size, const int in_stride, const int* in,
const int out_stride, int* out );
template void zcaffe_gpu_blockcopy<float>( const int num_block,
const int block_size, const int in_stride, const float* in,
const int out_stride, float* out );
template void zcaffe_gpu_blockcopy<double>( const int num_block,
const int block_size, const int in_stride, const double* in,
const int out_stride, double* out );
template <typename Dtype>
__global__ void zcaffe_gpu_repmul_kernel(const int n,
const int block, const Dtype* a, const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index]=a[index%block]*b[index];
}
}
template <typename Dtype>
__global__ void zcaffe_gpu_repmul_py_kernel(const int n,
const int block, const Dtype* a, const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index]+=a[index%block]*b[index];
}
}
template <typename Dtype>
void zcaffe_gpu_repmul( const int block,
const Dtype* a, const int iter, const Dtype* b, Dtype* y, bool is_py ) {
const int n = block*iter;
if (is_py)
hipLaunchKernelGGL(( zcaffe_gpu_repmul_py_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n, block, a, b, y );
else
hipLaunchKernelGGL(( zcaffe_gpu_repmul_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n, block, a, b, y );
}
template void zcaffe_gpu_repmul<float>(
const int block, const float* a, const int iter, const float* b, float* y, bool is_py );
template void zcaffe_gpu_repmul<double>(
const int block, const double* a, const int iter, const double* b, double* y, bool is_py );
template <typename Dtype>
__global__ void zcaffe_gpu_blockaxpy_kernel(const int n,
const int block_size, const int x_stride, const Dtype alpha, const Dtype* x,
const int y_stride, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
int b = index / block_size;
int o = index % block_size;
y[b*y_stride+o] += alpha*x[b*x_stride+o];
}
}
template <typename Dtype>
void zcaffe_gpu_blockaxpy( const int num_block,
const int block_size, const int x_stride, const Dtype alpha, const Dtype* x,
const int y_stride, Dtype* y ) {
const int n = num_block*block_size;
hipLaunchKernelGGL(( zcaffe_gpu_blockaxpy_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n, block_size, x_stride, alpha, x, y_stride, y);
}
template void zcaffe_gpu_blockaxpy<float>( const int num_block,
const int block_size, const int x_stride, const float alpha, const float* x,
const int y_stride, float* y );
template void zcaffe_gpu_blockaxpy<double>( const int num_block,
const int block_size, const int x_stride, const double alpha, const double* x,
const int y_stride, double* y );
// Reordering -----------------------------------------
template <int> struct zcaffe_gpu_reorder_func {
static bool skip( const int* dims ) { return false; }
private:
static int reorder( const int n, const int* dims );
static int dim();
};
template<>
struct zcaffe_gpu_reorder_func<zcaffe_reorder::D3S01> {
static __device__ int reorder( const int n, const int* dims ) {
int n0, n1, n2;
n1 = n /dims[2];
n0 = n1/dims[1];
n2 = n %dims[2];
n1 = n1%dims[1];
// 1 0 2
return (n1*dims[0]+n0)*dims[2]+n2;
}
static bool skip( const int* dims ) { return (dims[0]==1 || dims[1]==1); }
static int dim() { return 3; };
};
template<>
struct zcaffe_gpu_reorder_func<zcaffe_reorder::D3S12> {
static __device__ int reorder( const int n, const int* dims ) {
int n0, n1, n2;
n1 = n /dims[2];
n0 = n1/dims[1];
n2 = n %dims[2];
n1 = n1%dims[1];
// 0 2 1
return (n0*dims[2]+n2)*dims[1]+n1;
}
static bool skip( const int* dims ) { return (dims[1]==1 || dims[2]==1); }
static int dim() { return 3; };
};
template<>
struct zcaffe_gpu_reorder_func<zcaffe_reorder::D3S02> {
static __device__ int reorder( const int n, const int* dims ) {
int n0, n1, n2;
n1 = n /dims[2];
n0 = n1/dims[1];
n2 = n %dims[2];
n1 = n2%dims[1];
// 2 1 0
return (n2*dims[1]+n2)*dims[0]+n0;
}
static bool skip( const int* dims ) { return (dims[1]==1 || (dims[0]==1 && dims[2]==1)); }
static int dim() { return 3; };
};
template <typename Dtype, int ReorderType>
__global__ void zcaffe_gpu_reordering_kernel(const int n,
const int* dims, const Dtype* A, Dtype*B) {
typedef zcaffe_gpu_reorder_func<ReorderType> reorder_func;
CUDA_KERNEL_LOOP(index, n) {
int j =reorder_func::reorder(index,dims);
B[j] = A[index];
}
}
template <typename Dtype, int ReorderType >
void zcaffe_gpu_reordering( const int* dims, const Dtype* A, Dtype*B ) {
if (A==B)
LOG(WARNING) << "zcaffe_gpu_reordering: Do not support in-place reordering";
typedef zcaffe_gpu_reorder_func<ReorderType> reorder_func;
const int dimN = reorder_func::dim();
int n = 1;
for ( int i=0; i<dimN; ++i )
n *= dims[i];
if ( reorder_func::skip( dims ) ) {
caffe_copy( n, A, B );
} else {
thrust::host_vector<int> H(dims,dims+dimN);
thrust::device_vector<int> D = H;
hipLaunchKernelGGL(( zcaffe_gpu_reordering_kernel<Dtype,ReorderType>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n, thrust::raw_pointer_cast(D.data()), A, B );
}
}
#define INSTANTIATE_GPU_REORDERING(n) \
template void zcaffe_gpu_reordering<float,n>( const int* dims, const float* A, float* B ); \
template void zcaffe_gpu_reordering<double,n>( const int* dims, const double* A, double* B );
INSTANTIATE_GPU_REORDERING(zcaffe_reorder::D3S01);
INSTANTIATE_GPU_REORDERING(zcaffe_reorder::D3S12);
INSTANTIATE_GPU_REORDERING(zcaffe_reorder::D3S02);
// -----------------------------------------------------
} // namespace caffe
| 9242c4f9fcba65df34e724f7484cc9660e6dad47.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
// CUSTOM -------------------
template <typename Dtype>
__global__ void zcaffe_gpu_safeinv_kernel(const int n, const Dtype* in, Dtype* out, const Dtype numerator) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = (in[index] == Dtype(0.)) ? 0: (numerator/in[index]);
}
}
template <typename Dtype>
void zcaffe_gpu_safeinv( const int n, const Dtype* in, Dtype* out, const Dtype numerator ) {
zcaffe_gpu_safeinv_kernel<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, in, out, numerator);
}
template void zcaffe_gpu_safeinv<float>( const int n, const float* in, float* out, const float numerator );
template void zcaffe_gpu_safeinv<double>( const int n, const double* in, double* out, const double numerator );
template <typename Dtype>
__global__ void zcaffe_gpu_blockcopy_kernel(const int n,
const int block_size, const int in_stride, const Dtype* in,
const int out_stride, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
int b = index / block_size;
int o = index % block_size;
out[b*out_stride+o] = in[b*in_stride+o];
}
}
template <typename Dtype>
void zcaffe_gpu_blockcopy( const int num_block,
const int block_size, const int in_stride, const Dtype* in,
const int out_stride, Dtype* out ) {
const int n = num_block*block_size;
zcaffe_gpu_blockcopy_kernel<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, block_size, in_stride, in, out_stride, out);
}
template void zcaffe_gpu_blockcopy<unsigned int>( const int num_block,
const int block_size, const int in_stride, const unsigned int* in,
const int out_stride, unsigned int* out );
template void zcaffe_gpu_blockcopy<int>( const int num_block,
const int block_size, const int in_stride, const int* in,
const int out_stride, int* out );
template void zcaffe_gpu_blockcopy<float>( const int num_block,
const int block_size, const int in_stride, const float* in,
const int out_stride, float* out );
template void zcaffe_gpu_blockcopy<double>( const int num_block,
const int block_size, const int in_stride, const double* in,
const int out_stride, double* out );
template <typename Dtype>
__global__ void zcaffe_gpu_repmul_kernel(const int n,
const int block, const Dtype* a, const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index]=a[index%block]*b[index];
}
}
template <typename Dtype>
__global__ void zcaffe_gpu_repmul_py_kernel(const int n,
const int block, const Dtype* a, const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index]+=a[index%block]*b[index];
}
}
template <typename Dtype>
void zcaffe_gpu_repmul( const int block,
const Dtype* a, const int iter, const Dtype* b, Dtype* y, bool is_py ) {
const int n = block*iter;
if (is_py)
zcaffe_gpu_repmul_py_kernel<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, block, a, b, y );
else
zcaffe_gpu_repmul_kernel<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, block, a, b, y );
}
template void zcaffe_gpu_repmul<float>(
const int block, const float* a, const int iter, const float* b, float* y, bool is_py );
template void zcaffe_gpu_repmul<double>(
const int block, const double* a, const int iter, const double* b, double* y, bool is_py );
template <typename Dtype>
__global__ void zcaffe_gpu_blockaxpy_kernel(const int n,
const int block_size, const int x_stride, const Dtype alpha, const Dtype* x,
const int y_stride, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
int b = index / block_size;
int o = index % block_size;
y[b*y_stride+o] += alpha*x[b*x_stride+o];
}
}
template <typename Dtype>
void zcaffe_gpu_blockaxpy( const int num_block,
const int block_size, const int x_stride, const Dtype alpha, const Dtype* x,
const int y_stride, Dtype* y ) {
const int n = num_block*block_size;
zcaffe_gpu_blockaxpy_kernel<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, block_size, x_stride, alpha, x, y_stride, y);
}
template void zcaffe_gpu_blockaxpy<float>( const int num_block,
const int block_size, const int x_stride, const float alpha, const float* x,
const int y_stride, float* y );
template void zcaffe_gpu_blockaxpy<double>( const int num_block,
const int block_size, const int x_stride, const double alpha, const double* x,
const int y_stride, double* y );
// Reordering -----------------------------------------
template <int> struct zcaffe_gpu_reorder_func {
static bool skip( const int* dims ) { return false; }
private:
static int reorder( const int n, const int* dims );
static int dim();
};
template<>
struct zcaffe_gpu_reorder_func<zcaffe_reorder::D3S01> {
static __device__ int reorder( const int n, const int* dims ) {
int n0, n1, n2;
n1 = n /dims[2];
n0 = n1/dims[1];
n2 = n %dims[2];
n1 = n1%dims[1];
// 1 0 2
return (n1*dims[0]+n0)*dims[2]+n2;
}
static bool skip( const int* dims ) { return (dims[0]==1 || dims[1]==1); }
static int dim() { return 3; };
};
template<>
struct zcaffe_gpu_reorder_func<zcaffe_reorder::D3S12> {
static __device__ int reorder( const int n, const int* dims ) {
int n0, n1, n2;
n1 = n /dims[2];
n0 = n1/dims[1];
n2 = n %dims[2];
n1 = n1%dims[1];
// 0 2 1
return (n0*dims[2]+n2)*dims[1]+n1;
}
static bool skip( const int* dims ) { return (dims[1]==1 || dims[2]==1); }
static int dim() { return 3; };
};
template<>
struct zcaffe_gpu_reorder_func<zcaffe_reorder::D3S02> {
static __device__ int reorder( const int n, const int* dims ) {
int n0, n1, n2;
n1 = n /dims[2];
n0 = n1/dims[1];
n2 = n %dims[2];
n1 = n2%dims[1];
// 2 1 0
return (n2*dims[1]+n2)*dims[0]+n0;
}
static bool skip( const int* dims ) { return (dims[1]==1 || (dims[0]==1 && dims[2]==1)); }
static int dim() { return 3; };
};
template <typename Dtype, int ReorderType>
__global__ void zcaffe_gpu_reordering_kernel(const int n,
const int* dims, const Dtype* A, Dtype*B) {
typedef zcaffe_gpu_reorder_func<ReorderType> reorder_func;
CUDA_KERNEL_LOOP(index, n) {
int j =reorder_func::reorder(index,dims);
B[j] = A[index];
}
}
template <typename Dtype, int ReorderType >
void zcaffe_gpu_reordering( const int* dims, const Dtype* A, Dtype*B ) {
if (A==B)
LOG(WARNING) << "zcaffe_gpu_reordering: Do not support in-place reordering";
typedef zcaffe_gpu_reorder_func<ReorderType> reorder_func;
const int dimN = reorder_func::dim();
int n = 1;
for ( int i=0; i<dimN; ++i )
n *= dims[i];
if ( reorder_func::skip( dims ) ) {
caffe_copy( n, A, B );
} else {
thrust::host_vector<int> H(dims,dims+dimN);
thrust::device_vector<int> D = H;
zcaffe_gpu_reordering_kernel<Dtype,ReorderType><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, thrust::raw_pointer_cast(D.data()), A, B );
}
}
#define INSTANTIATE_GPU_REORDERING(n) \
template void zcaffe_gpu_reordering<float,n>( const int* dims, const float* A, float* B ); \
template void zcaffe_gpu_reordering<double,n>( const int* dims, const double* A, double* B );
INSTANTIATE_GPU_REORDERING(zcaffe_reorder::D3S01);
INSTANTIATE_GPU_REORDERING(zcaffe_reorder::D3S12);
INSTANTIATE_GPU_REORDERING(zcaffe_reorder::D3S02);
// -----------------------------------------------------
} // namespace caffe
|
67ec7f704d267f6048592f7fd3ccdca1a27af4c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void d_count_kernel(unsigned int * d_pivots, int * r_buckets, int pivotsLength, unsigned int * r_indices, unsigned int * r_sublist, unsigned int * d_in, int itemCount) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < itemCount) {
unsigned int element = d_in[idx];
unsigned int index = pivotsLength/2 - 1;
unsigned int jump = pivotsLength/4;
int pivot = d_pivots[index];
while(jump >= 1) {
index = (element < pivot) ? (index - jump) : (index + jump);
pivot = d_pivots[index];
jump /= 2;
}
index = (element < pivot) ? index : index + 1;
r_sublist[idx] = index;
r_indices[idx] = atomicAdd(&r_buckets[index], 1);
}
} | 67ec7f704d267f6048592f7fd3ccdca1a27af4c8.cu | #include "includes.h"
__global__ void d_count_kernel(unsigned int * d_pivots, int * r_buckets, int pivotsLength, unsigned int * r_indices, unsigned int * r_sublist, unsigned int * d_in, int itemCount) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < itemCount) {
unsigned int element = d_in[idx];
unsigned int index = pivotsLength/2 - 1;
unsigned int jump = pivotsLength/4;
int pivot = d_pivots[index];
while(jump >= 1) {
index = (element < pivot) ? (index - jump) : (index + jump);
pivot = d_pivots[index];
jump /= 2;
}
index = (element < pivot) ? index : index + 1;
r_sublist[idx] = index;
r_indices[idx] = atomicAdd(&r_buckets[index], 1);
}
} |
025fdbf1fa848e4883d4655bf4b66a7b4e6f3c98.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHTensorTypeUtils.cuh"
#include "THHTensor.h"
#include "THHTensorCopy.h"
#include "THHHalf.h"
#include <stdlib.h>
namespace {
struct SizeAndStride {
int64_t size;
int64_t stride;
};
int compareSizeAndStride(const void* a, const void* b) {
const SizeAndStride* aS = (const SizeAndStride*) a;
const SizeAndStride* bS = (const SizeAndStride*) b;
return aS->stride < bS->stride;
}
}
#define IMPL_TENSOR_UTILS(TENSOR_TYPE, DATA_TYPE) \
\
TENSOR_TYPE* \
TensorUtils<TENSOR_TYPE>::newTensor(THCState* state) { \
return TENSOR_TYPE##_new(state); \
} \
\
TENSOR_TYPE* \
TensorUtils<TENSOR_TYPE>::newContiguous(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_newContiguous(state, t); \
} \
\
THLongStorage* \
TensorUtils<TENSOR_TYPE>::newSizeOf(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_newSizeOf(state, t); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::retain(THCState* state, \
TENSOR_TYPE* t) { \
TENSOR_TYPE##_retain(state, t); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::free(THCState* state, \
TENSOR_TYPE* t) { \
TENSOR_TYPE##_free(state, t); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::freeCopyTo(THCState* state, \
TENSOR_TYPE* src, \
TENSOR_TYPE* dst) { \
TENSOR_TYPE##_freeCopyTo(state, src, dst); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::resize(THCState* state, \
TENSOR_TYPE* out, \
THLongStorage* sizes, \
THLongStorage* strides) { \
TENSOR_TYPE##_resize(state, out, sizes, strides); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::resizeAs(THCState* state, \
TENSOR_TYPE* dst, \
TENSOR_TYPE* src) { \
TENSOR_TYPE##_resizeAs(state, dst, src); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::squeeze1d(THCState *state, \
TENSOR_TYPE *dst, \
TENSOR_TYPE *src, \
int dimension) { \
TENSOR_TYPE##_squeeze1d(state, dst, src, dimension); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::unsqueeze1d(THCState *state, \
TENSOR_TYPE *dst, \
TENSOR_TYPE *src, \
int dimension) { \
TENSOR_TYPE##_unsqueeze1d(state, dst, src, dimension); \
} \
\
DATA_TYPE* \
TensorUtils<TENSOR_TYPE>::getData(THCState* state, \
TENSOR_TYPE* t) { \
/* FIXME: no cast is required except for THCudaHalfTensor */ \
return (DATA_TYPE*) TENSOR_TYPE##_data(state, t); \
} \
\
ptrdiff_t \
TensorUtils<TENSOR_TYPE>::getNumElements(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_nElement(state, t); \
} \
\
int64_t \
TensorUtils<TENSOR_TYPE>::getSize(THCState* state, \
TENSOR_TYPE* t, \
int dim) { \
return TENSOR_TYPE##_size(state, t, dim); \
} \
\
int64_t \
TensorUtils<TENSOR_TYPE>::getStride(THCState* state, \
TENSOR_TYPE* t, \
int dim) { \
return TENSOR_TYPE##_stride(state, t, dim); \
} \
\
int \
TensorUtils<TENSOR_TYPE>::getDims(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_nDimension(state, t); \
} \
\
bool \
TensorUtils<TENSOR_TYPE>::isContiguous(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_isContiguous(state, t); \
} \
\
bool \
TensorUtils<TENSOR_TYPE>::allContiguous(THCState* state, \
TENSOR_TYPE** inputs, \
int numInputs) { \
THAssert(numInputs > 0); \
for (int i = 0; i < numInputs; ++i) { \
if (!TensorUtils<TENSOR_TYPE>::isContiguous(state, inputs[i])) { \
return false; \
} \
} \
return true; \
} \
\
/* Due to the resize semantics of ops with `out=` keywords, if */ \
/* the output `tensor` has the same shape as the output of the */ \
/* reduction operation, then any noncontiguities in the output */ \
/* `tensor` should be preserved. This needs to be special cased b/c */ \
/* otherwise, when keepdim=False, the implementations of reduction */ \
/* ops resize `tensor` to the reduced size with keepdim=True, and */ \
/* then later squeeze `tensor` to the correct output size, breaking */ \
/* the contiguity guarantees of the resize semantics. */ \
void \
TensorUtils<TENSOR_TYPE>::preserveReduceDimSemantics( \
THCState *state, TENSOR_TYPE *tensor, \
int in_dims, int64_t dimension, int keepdim) {\
int out_dims = TensorUtils<TENSOR_TYPE>::getDims(state, tensor); \
if (out_dims > 0 && !keepdim && out_dims == in_dims - 1) { \
TensorUtils<TENSOR_TYPE>::unsqueeze1d(state, tensor, tensor, dimension);\
} \
} \
\
int \
TensorUtils<TENSOR_TYPE>::getDevice(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_getDevice(state, t); \
} \
\
bool \
TensorUtils<TENSOR_TYPE>::allSameDevice(THCState* state, \
TENSOR_TYPE** inputs, \
int numInputs) { \
THAssert(numInputs > 0); \
int device = TensorUtils<TENSOR_TYPE>::getDevice(state, inputs[0]); \
for (int i = 1; i < numInputs; ++i) { \
if (TensorUtils<TENSOR_TYPE>::getDevice(state, inputs[i]) != device) { \
return false; \
} \
} \
return true; \
} \
\
void \
TensorUtils<TENSOR_TYPE>::copyIgnoringOverlaps(THCState* state, \
TENSOR_TYPE* dst, \
TENSOR_TYPE* src) { \
return TENSOR_TYPE##_copyIgnoringOverlaps(state, dst, src); \
} \
\
bool \
TensorUtils<TENSOR_TYPE>::overlappingIndices(THCState* state, \
TENSOR_TYPE* t) { \
/* In this function, we don't care about permutations of the */ \
/* size/stride arrays (transpositions). */ \
/* We order the size/stride arrays by stride, skipping dimensions */ \
/* of size 1. Strides of dimensions of size 1 don't matter, since */ \
/* there is only one addressing point in them. */ \
/* In this reordered view, the tensor is contiguous if */ \
/* stride[dim] == size[dim + 1] * stride[dim + 1] for all `dim`. */ \
/* The tensor has holes if */ \
/* stride[dim] > size[dim + 1] * stride[dim + 1] for one or more */ \
/* `dim`. */ \
/* The tensor has overlaps if */ \
/* stride[dim] < size[dim + 1] * stride[dim + 1] for one or more */ \
/* `dim`, or the innermost stride is 0. */ \
\
/* Extract size/stride arrays; only consider size >1 dims. */ \
SizeAndStride info[MAX_CUTORCH_DIMS]; \
\
int dims = TensorUtils<TENSOR_TYPE>::getDims(state, t); \
int nonSize1Dims = 0; \
for (int i = 0; i < dims; ++i) { \
int64_t size = TensorUtils<TENSOR_TYPE>::getSize(state, t, i); \
if (size > 1) { \
info[nonSize1Dims].size = size; \
info[nonSize1Dims].stride = \
TensorUtils<TENSOR_TYPE>::getStride(state, t, i); \
++nonSize1Dims; \
} \
} \
\
if (nonSize1Dims == 0) { \
/* no overlap */ \
return false; \
} \
\
/* Ascending order (innermost dimension in sorted view is at [0]) */ \
qsort(info, nonSize1Dims, sizeof(SizeAndStride), compareSizeAndStride); \
\
/* Base case: innermost dimension must have stride >= 1 */ \
if (info[nonSize1Dims - 1].stride < 1) { \
return true; \
} \
\
/* Subsequent dimensions, if any */ \
for (int i = nonSize1Dims - 2; i >= 0; --i) { \
if (info[i].stride < info[i + 1].size * info[i + 1].stride) { \
/* There are overlaps */ \
return true; \
} \
} \
\
/* Tensor has holes or is contiguous */ \
return false; \
} \
\
bool \
TensorUtils<TENSOR_TYPE>::canUse32BitIndexMath(THCState* state, \
TENSOR_TYPE* t, \
ptrdiff_t max_elem) { \
ptrdiff_t elements = TensorUtils<TENSOR_TYPE>::getNumElements(state, t); \
if (elements >= max_elem) { \
return false; \
} \
\
ptrdiff_t offset = 0; \
ptrdiff_t linearId = elements - 1; \
\
for (int i = TensorUtils<TENSOR_TYPE>::getDims(state, t) - 1; i >= 0; --i) { \
ptrdiff_t curDimIndex = \
linearId % TensorUtils<TENSOR_TYPE>::getSize(state, t, i); \
ptrdiff_t curDimOffset = curDimIndex * \
TensorUtils<TENSOR_TYPE>::getStride(state, t, i); \
offset += curDimOffset; \
linearId /= TensorUtils<TENSOR_TYPE>::getSize(state, t, i); \
} \
\
if (offset >= max_elem) { \
return false; \
} \
\
return true; \
} \
\
bool \
TensorUtils<TENSOR_TYPE>::all32BitIndexable(THCState* state, \
TENSOR_TYPE** inputs, \
int numInputs) { \
for (int i = 0; i < numInputs; ++i) { \
if (!TensorUtils<TENSOR_TYPE>::canUse32BitIndexMath(state, inputs[i])) { \
return false; \
} \
} \
return true; \
}
IMPL_TENSOR_UTILS(THCudaByteTensor, uint8_t)
IMPL_TENSOR_UTILS(THCudaCharTensor, int8_t)
IMPL_TENSOR_UTILS(THCudaShortTensor, int16_t)
IMPL_TENSOR_UTILS(THCudaIntTensor, int32_t)
IMPL_TENSOR_UTILS(THCudaLongTensor, int64_t)
IMPL_TENSOR_UTILS(THCudaTensor, float)
IMPL_TENSOR_UTILS(THCudaDoubleTensor, double)
#ifdef CUDA_HALF_TENSOR
IMPL_TENSOR_UTILS(THCudaHalfTensor, half);
#endif
#undef IMPL_TENSOR_UTILS
| 025fdbf1fa848e4883d4655bf4b66a7b4e6f3c98.cu | #include "THCTensorTypeUtils.cuh"
#include "THCTensor.h"
#include "THCTensorCopy.h"
#include "THCHalf.h"
#include <stdlib.h>
namespace {
struct SizeAndStride {
int64_t size;
int64_t stride;
};
int compareSizeAndStride(const void* a, const void* b) {
const SizeAndStride* aS = (const SizeAndStride*) a;
const SizeAndStride* bS = (const SizeAndStride*) b;
return aS->stride < bS->stride;
}
}
#define IMPL_TENSOR_UTILS(TENSOR_TYPE, DATA_TYPE) \
\
TENSOR_TYPE* \
TensorUtils<TENSOR_TYPE>::newTensor(THCState* state) { \
return TENSOR_TYPE##_new(state); \
} \
\
TENSOR_TYPE* \
TensorUtils<TENSOR_TYPE>::newContiguous(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_newContiguous(state, t); \
} \
\
THLongStorage* \
TensorUtils<TENSOR_TYPE>::newSizeOf(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_newSizeOf(state, t); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::retain(THCState* state, \
TENSOR_TYPE* t) { \
TENSOR_TYPE##_retain(state, t); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::free(THCState* state, \
TENSOR_TYPE* t) { \
TENSOR_TYPE##_free(state, t); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::freeCopyTo(THCState* state, \
TENSOR_TYPE* src, \
TENSOR_TYPE* dst) { \
TENSOR_TYPE##_freeCopyTo(state, src, dst); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::resize(THCState* state, \
TENSOR_TYPE* out, \
THLongStorage* sizes, \
THLongStorage* strides) { \
TENSOR_TYPE##_resize(state, out, sizes, strides); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::resizeAs(THCState* state, \
TENSOR_TYPE* dst, \
TENSOR_TYPE* src) { \
TENSOR_TYPE##_resizeAs(state, dst, src); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::squeeze1d(THCState *state, \
TENSOR_TYPE *dst, \
TENSOR_TYPE *src, \
int dimension) { \
TENSOR_TYPE##_squeeze1d(state, dst, src, dimension); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::unsqueeze1d(THCState *state, \
TENSOR_TYPE *dst, \
TENSOR_TYPE *src, \
int dimension) { \
TENSOR_TYPE##_unsqueeze1d(state, dst, src, dimension); \
} \
\
DATA_TYPE* \
TensorUtils<TENSOR_TYPE>::getData(THCState* state, \
TENSOR_TYPE* t) { \
/* FIXME: no cast is required except for THCudaHalfTensor */ \
return (DATA_TYPE*) TENSOR_TYPE##_data(state, t); \
} \
\
ptrdiff_t \
TensorUtils<TENSOR_TYPE>::getNumElements(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_nElement(state, t); \
} \
\
int64_t \
TensorUtils<TENSOR_TYPE>::getSize(THCState* state, \
TENSOR_TYPE* t, \
int dim) { \
return TENSOR_TYPE##_size(state, t, dim); \
} \
\
int64_t \
TensorUtils<TENSOR_TYPE>::getStride(THCState* state, \
TENSOR_TYPE* t, \
int dim) { \
return TENSOR_TYPE##_stride(state, t, dim); \
} \
\
int \
TensorUtils<TENSOR_TYPE>::getDims(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_nDimension(state, t); \
} \
\
bool \
TensorUtils<TENSOR_TYPE>::isContiguous(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_isContiguous(state, t); \
} \
\
bool \
TensorUtils<TENSOR_TYPE>::allContiguous(THCState* state, \
TENSOR_TYPE** inputs, \
int numInputs) { \
THAssert(numInputs > 0); \
for (int i = 0; i < numInputs; ++i) { \
if (!TensorUtils<TENSOR_TYPE>::isContiguous(state, inputs[i])) { \
return false; \
} \
} \
return true; \
} \
\
/* Due to the resize semantics of ops with `out=` keywords, if */ \
/* the output `tensor` has the same shape as the output of the */ \
/* reduction operation, then any noncontiguities in the output */ \
/* `tensor` should be preserved. This needs to be special cased b/c */ \
/* otherwise, when keepdim=False, the implementations of reduction */ \
/* ops resize `tensor` to the reduced size with keepdim=True, and */ \
/* then later squeeze `tensor` to the correct output size, breaking */ \
/* the contiguity guarantees of the resize semantics. */ \
void \
TensorUtils<TENSOR_TYPE>::preserveReduceDimSemantics( \
THCState *state, TENSOR_TYPE *tensor, \
int in_dims, int64_t dimension, int keepdim) {\
int out_dims = TensorUtils<TENSOR_TYPE>::getDims(state, tensor); \
if (out_dims > 0 && !keepdim && out_dims == in_dims - 1) { \
TensorUtils<TENSOR_TYPE>::unsqueeze1d(state, tensor, tensor, dimension);\
} \
} \
\
int \
TensorUtils<TENSOR_TYPE>::getDevice(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_getDevice(state, t); \
} \
\
bool \
TensorUtils<TENSOR_TYPE>::allSameDevice(THCState* state, \
TENSOR_TYPE** inputs, \
int numInputs) { \
THAssert(numInputs > 0); \
int device = TensorUtils<TENSOR_TYPE>::getDevice(state, inputs[0]); \
for (int i = 1; i < numInputs; ++i) { \
if (TensorUtils<TENSOR_TYPE>::getDevice(state, inputs[i]) != device) { \
return false; \
} \
} \
return true; \
} \
\
void \
TensorUtils<TENSOR_TYPE>::copyIgnoringOverlaps(THCState* state, \
TENSOR_TYPE* dst, \
TENSOR_TYPE* src) { \
return TENSOR_TYPE##_copyIgnoringOverlaps(state, dst, src); \
} \
\
bool \
TensorUtils<TENSOR_TYPE>::overlappingIndices(THCState* state, \
TENSOR_TYPE* t) { \
/* In this function, we don't care about permutations of the */ \
/* size/stride arrays (transpositions). */ \
/* We order the size/stride arrays by stride, skipping dimensions */ \
/* of size 1. Strides of dimensions of size 1 don't matter, since */ \
/* there is only one addressing point in them. */ \
/* In this reordered view, the tensor is contiguous if */ \
/* stride[dim] == size[dim + 1] * stride[dim + 1] for all `dim`. */ \
/* The tensor has holes if */ \
/* stride[dim] > size[dim + 1] * stride[dim + 1] for one or more */ \
/* `dim`. */ \
/* The tensor has overlaps if */ \
/* stride[dim] < size[dim + 1] * stride[dim + 1] for one or more */ \
/* `dim`, or the innermost stride is 0. */ \
\
/* Extract size/stride arrays; only consider size >1 dims. */ \
SizeAndStride info[MAX_CUTORCH_DIMS]; \
\
int dims = TensorUtils<TENSOR_TYPE>::getDims(state, t); \
int nonSize1Dims = 0; \
for (int i = 0; i < dims; ++i) { \
int64_t size = TensorUtils<TENSOR_TYPE>::getSize(state, t, i); \
if (size > 1) { \
info[nonSize1Dims].size = size; \
info[nonSize1Dims].stride = \
TensorUtils<TENSOR_TYPE>::getStride(state, t, i); \
++nonSize1Dims; \
} \
} \
\
if (nonSize1Dims == 0) { \
/* no overlap */ \
return false; \
} \
\
/* Ascending order (innermost dimension in sorted view is at [0]) */ \
qsort(info, nonSize1Dims, sizeof(SizeAndStride), compareSizeAndStride); \
\
/* Base case: innermost dimension must have stride >= 1 */ \
if (info[nonSize1Dims - 1].stride < 1) { \
return true; \
} \
\
/* Subsequent dimensions, if any */ \
for (int i = nonSize1Dims - 2; i >= 0; --i) { \
if (info[i].stride < info[i + 1].size * info[i + 1].stride) { \
/* There are overlaps */ \
return true; \
} \
} \
\
/* Tensor has holes or is contiguous */ \
return false; \
} \
\
bool \
TensorUtils<TENSOR_TYPE>::canUse32BitIndexMath(THCState* state, \
TENSOR_TYPE* t, \
ptrdiff_t max_elem) { \
ptrdiff_t elements = TensorUtils<TENSOR_TYPE>::getNumElements(state, t); \
if (elements >= max_elem) { \
return false; \
} \
\
ptrdiff_t offset = 0; \
ptrdiff_t linearId = elements - 1; \
\
for (int i = TensorUtils<TENSOR_TYPE>::getDims(state, t) - 1; i >= 0; --i) { \
ptrdiff_t curDimIndex = \
linearId % TensorUtils<TENSOR_TYPE>::getSize(state, t, i); \
ptrdiff_t curDimOffset = curDimIndex * \
TensorUtils<TENSOR_TYPE>::getStride(state, t, i); \
offset += curDimOffset; \
linearId /= TensorUtils<TENSOR_TYPE>::getSize(state, t, i); \
} \
\
if (offset >= max_elem) { \
return false; \
} \
\
return true; \
} \
\
bool \
TensorUtils<TENSOR_TYPE>::all32BitIndexable(THCState* state, \
TENSOR_TYPE** inputs, \
int numInputs) { \
for (int i = 0; i < numInputs; ++i) { \
if (!TensorUtils<TENSOR_TYPE>::canUse32BitIndexMath(state, inputs[i])) { \
return false; \
} \
} \
return true; \
}
IMPL_TENSOR_UTILS(THCudaByteTensor, uint8_t)
IMPL_TENSOR_UTILS(THCudaCharTensor, int8_t)
IMPL_TENSOR_UTILS(THCudaShortTensor, int16_t)
IMPL_TENSOR_UTILS(THCudaIntTensor, int32_t)
IMPL_TENSOR_UTILS(THCudaLongTensor, int64_t)
IMPL_TENSOR_UTILS(THCudaTensor, float)
IMPL_TENSOR_UTILS(THCudaDoubleTensor, double)
#ifdef CUDA_HALF_TENSOR
IMPL_TENSOR_UTILS(THCudaHalfTensor, half);
#endif
#undef IMPL_TENSOR_UTILS
|
2fb22e0e623f1acd7421fc6aa9a2484860984d4f.hip | // !!! This is a file automatically generated by hipify!!!
#include "gpu_plasma.h"
#include <stdlib.h>
#include "mpi_shortcut.h"
int prepare_code_execution()
{
GPUPlasma<GPUCell> *plasma;
size_t sizeP;
printf("oarticle size %d %d \n",sizeof(Particle),sizeof(Particle)/sizeof(double));
hipDeviceGetLimit(&sizeP,hipLimitPrintfFifoSize);
printf("printf default limit %d \n",sizeP/1024/1024);
sizeP *= 10000;
hipDeviceSetLimit(hipLimitPrintfFifoSize, sizeP);
hipDeviceGetLimit(&sizeP,hipLimitPrintfFifoSize);
printf("printf limit set to %d \n",sizeP/1024/1024);
int err = hipSetDevice(0);
printf("err %d \n",err);
plasma = new GPUPlasma<GPUCell>(100,4,4,1.1424,0.05,0.05,1.0,2000,1.0,0.001);
plasma->Initialize();
}
| 2fb22e0e623f1acd7421fc6aa9a2484860984d4f.cu | #include "gpu_plasma.h"
#include <stdlib.h>
#include "mpi_shortcut.h"
int prepare_code_execution()
{
GPUPlasma<GPUCell> *plasma;
size_t sizeP;
printf("oarticle size %d %d \n",sizeof(Particle),sizeof(Particle)/sizeof(double));
cudaDeviceGetLimit(&sizeP,cudaLimitPrintfFifoSize);
printf("printf default limit %d \n",sizeP/1024/1024);
sizeP *= 10000;
cudaDeviceSetLimit(cudaLimitPrintfFifoSize, sizeP);
cudaDeviceGetLimit(&sizeP,cudaLimitPrintfFifoSize);
printf("printf limit set to %d \n",sizeP/1024/1024);
int err = cudaSetDevice(0);
printf("err %d \n",err);
plasma = new GPUPlasma<GPUCell>(100,4,4,1.1424,0.05,0.05,1.0,2000,1.0,0.001);
plasma->Initialize();
}
|
5e00a87701600ad8548c6d023746b90cc8c098df.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuda_renderer.h"
#include <hip/hip_runtime_api.h>
#include <stdio.h>
template <typename scalar_t>
__global__ void forward_flow_shift_renderer_cuda_kernel(const scalar_t* image_src,
const scalar_t* flow_src_to_tar,
const int batch_size,
const int nChannelsTexture,
const int imageH,
const int imageW,
const int kernel_radius,
const float kernel_sigma2,
scalar_t* image_tar_w_I,
scalar_t* image_tar_w,
scalar_t* mask)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * imageH * imageW) {
return;
}
const int bn = i / (imageH * imageW);
float flow_x = flow_src_to_tar[2*i];
float flow_y = flow_src_to_tar[2*i+1];
int tl_x=flow_x-kernel_radius+0.5;
int tl_y=flow_y-kernel_radius+0.5;
int br_x=flow_x+kernel_radius+0.5;
int br_y=flow_y+kernel_radius+0.5;
tl_x=(tl_x>=0?tl_x:0);
tl_y=(tl_y>=0?tl_y:0);
br_x=(br_x<imageW?br_x:imageW-1);
br_y=(br_y<imageH?br_y:imageH-1);
for(int y=tl_y; y<br_y+1; y++)
{
for(int x=tl_x; x<br_x+1; x++)
{
if(x<0) continue;
if(x>imageW-1) continue;
if(y<0) continue;
if(y>imageH-1) continue;
float dx = x - flow_x;
float dy = y - flow_y;
float d2 = dx*dx+dy*dy;
float w = expf(-0.5 * d2 / kernel_sigma2);
int index = bn * (imageH * imageW) + y * imageW + x;
for (int j=0; j< nChannelsTexture; j++)
{
atomicAdd(image_tar_w_I+index*nChannelsTexture + j, w * image_src[i*nChannelsTexture+j]);
}
atomicAdd(image_tar_w+index, w);
mask[index]=1.0;
}
}
}
template <typename scalar_t>
__global__ void backward_flow_shift_renderer_cuda_kernel(const scalar_t* image_src,
const scalar_t* flow_src_to_tar,
const int batch_size,
const int nChannelsTexture,
const int imageH,
const int imageW,
const int kernel_radius,
const float kernel_sigma2,
const scalar_t* image_tar_w_I,
const scalar_t* image_tar_w,
const scalar_t* grad_image_target,
scalar_t* grad_image_src,
scalar_t* grad_flow)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * imageH * imageW) {
return;
}
const int bn = i / (imageH * imageW);
float flow_x = flow_src_to_tar[2*i];
float flow_y = flow_src_to_tar[2*i+1];
int tl_x=flow_x-kernel_radius+0.5;
int tl_y=flow_y-kernel_radius+0.5;
int br_x=flow_x+kernel_radius+0.5;
int br_y=flow_y+kernel_radius+0.5;
tl_x=(tl_x>=0?tl_x:0);
tl_y=(tl_y>=0?tl_y:0);
br_x=(br_x<imageW?br_x:imageW-1);
br_y=(br_y<imageH?br_y:imageH-1);
for(int y=tl_y; y<br_y+1; y++)
{
for(int x=tl_x; x<br_x+1; x++)
{
if(x<0) continue;
if(x>imageW-1) continue;
if(y<0) continue;
if(y>imageH-1) continue;
float dx = x - flow_x;
float dy = y - flow_y;
float d2 = dx*dx+dy*dy;
float w = expf(-0.5 * d2 / kernel_sigma2);
int index = bn * (imageH * imageW) + y * imageW + x;
float It_w = image_tar_w[index];
float Dw_DflowX = w*(-0.5)/kernel_sigma2*(-2.0)*dx;
float Dw_DflowY = w*(-0.5)/kernel_sigma2*(-2.0)*dy;
for(int j=0; j < nChannelsTexture; j++)
{
float De_DItj = grad_image_target[index*nChannelsTexture+j];
float Is_j = image_src[i*nChannelsTexture + j];
float It_w_Ij = image_tar_w_I[index*nChannelsTexture+j];
float DItj_Dw = (Is_j*It_w - It_w_Ij)/(It_w*It_w+1e-8);
float De_DflowX = De_DItj * DItj_Dw * Dw_DflowX;
float De_DflowY = De_DItj * DItj_Dw * Dw_DflowY;
// accumulate
grad_flow[i*2]+=De_DflowX;
grad_flow[i*2+1]+=De_DflowY;
grad_image_src[i*nChannelsTexture+j] += De_DItj * w / (It_w+1e-8);
}
}
}
}
void forward_flow_shift_renderer(const float* image_src,
const float* flow_src_to_tar,
const int batch_size,
const int nChannelsTexture,
const int imageH,
const int imageW,
const int kernel_radius,
const float kernel_sigma2,
float* image_tar_w_I,
float* image_tar_w,
float* mask)
{
const int threads = 512;
const dim3 blocks ((batch_size * imageH * imageW - 1) / threads + 1);
hipLaunchKernelGGL(( forward_flow_shift_renderer_cuda_kernel<float>), dim3(blocks), dim3(threads), 0, 0, image_src,
flow_src_to_tar,
batch_size,
nChannelsTexture,
imageH,
imageW,
kernel_radius,
kernel_sigma2,
image_tar_w_I,
image_tar_w,
mask);
hipDeviceSynchronize();
}
void backward_flow_shift_renderer(const float* image_src,
const float* flow_src_to_tar,
const int batch_size,
const int nChannelsTexture,
const int imageH,
const int imageW,
const int kernel_radius,
const float kernel_sigma2,
const float* image_tar_w_I,
const float* image_tar_w,
const float* grad_image_target,
float* grad_image_src,
float* grad_flow)
{
const int threads = 512;
const dim3 blocks ((batch_size * imageH * imageW - 1) / threads + 1);
hipLaunchKernelGGL(( backward_flow_shift_renderer_cuda_kernel<float>), dim3(blocks), dim3(threads), 0, 0, image_src,
flow_src_to_tar,
batch_size,
nChannelsTexture,
imageH,
imageW,
kernel_radius,
kernel_sigma2,
image_tar_w_I,
image_tar_w,
grad_image_target,
grad_image_src,
grad_flow);
hipDeviceSynchronize();
}
| 5e00a87701600ad8548c6d023746b90cc8c098df.cu | #include "cuda_renderer.h"
#include <cuda_runtime_api.h>
#include <stdio.h>
template <typename scalar_t>
__global__ void forward_flow_shift_renderer_cuda_kernel(const scalar_t* image_src,
const scalar_t* flow_src_to_tar,
const int batch_size,
const int nChannelsTexture,
const int imageH,
const int imageW,
const int kernel_radius,
const float kernel_sigma2,
scalar_t* image_tar_w_I,
scalar_t* image_tar_w,
scalar_t* mask)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * imageH * imageW) {
return;
}
const int bn = i / (imageH * imageW);
float flow_x = flow_src_to_tar[2*i];
float flow_y = flow_src_to_tar[2*i+1];
int tl_x=flow_x-kernel_radius+0.5;
int tl_y=flow_y-kernel_radius+0.5;
int br_x=flow_x+kernel_radius+0.5;
int br_y=flow_y+kernel_radius+0.5;
tl_x=(tl_x>=0?tl_x:0);
tl_y=(tl_y>=0?tl_y:0);
br_x=(br_x<imageW?br_x:imageW-1);
br_y=(br_y<imageH?br_y:imageH-1);
for(int y=tl_y; y<br_y+1; y++)
{
for(int x=tl_x; x<br_x+1; x++)
{
if(x<0) continue;
if(x>imageW-1) continue;
if(y<0) continue;
if(y>imageH-1) continue;
float dx = x - flow_x;
float dy = y - flow_y;
float d2 = dx*dx+dy*dy;
float w = expf(-0.5 * d2 / kernel_sigma2);
int index = bn * (imageH * imageW) + y * imageW + x;
for (int j=0; j< nChannelsTexture; j++)
{
atomicAdd(image_tar_w_I+index*nChannelsTexture + j, w * image_src[i*nChannelsTexture+j]);
}
atomicAdd(image_tar_w+index, w);
mask[index]=1.0;
}
}
}
template <typename scalar_t>
__global__ void backward_flow_shift_renderer_cuda_kernel(const scalar_t* image_src,
const scalar_t* flow_src_to_tar,
const int batch_size,
const int nChannelsTexture,
const int imageH,
const int imageW,
const int kernel_radius,
const float kernel_sigma2,
const scalar_t* image_tar_w_I,
const scalar_t* image_tar_w,
const scalar_t* grad_image_target,
scalar_t* grad_image_src,
scalar_t* grad_flow)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * imageH * imageW) {
return;
}
const int bn = i / (imageH * imageW);
float flow_x = flow_src_to_tar[2*i];
float flow_y = flow_src_to_tar[2*i+1];
int tl_x=flow_x-kernel_radius+0.5;
int tl_y=flow_y-kernel_radius+0.5;
int br_x=flow_x+kernel_radius+0.5;
int br_y=flow_y+kernel_radius+0.5;
tl_x=(tl_x>=0?tl_x:0);
tl_y=(tl_y>=0?tl_y:0);
br_x=(br_x<imageW?br_x:imageW-1);
br_y=(br_y<imageH?br_y:imageH-1);
for(int y=tl_y; y<br_y+1; y++)
{
for(int x=tl_x; x<br_x+1; x++)
{
if(x<0) continue;
if(x>imageW-1) continue;
if(y<0) continue;
if(y>imageH-1) continue;
float dx = x - flow_x;
float dy = y - flow_y;
float d2 = dx*dx+dy*dy;
float w = expf(-0.5 * d2 / kernel_sigma2);
int index = bn * (imageH * imageW) + y * imageW + x;
float It_w = image_tar_w[index];
float Dw_DflowX = w*(-0.5)/kernel_sigma2*(-2.0)*dx;
float Dw_DflowY = w*(-0.5)/kernel_sigma2*(-2.0)*dy;
for(int j=0; j < nChannelsTexture; j++)
{
float De_DItj = grad_image_target[index*nChannelsTexture+j];
float Is_j = image_src[i*nChannelsTexture + j];
float It_w_Ij = image_tar_w_I[index*nChannelsTexture+j];
float DItj_Dw = (Is_j*It_w - It_w_Ij)/(It_w*It_w+1e-8);
float De_DflowX = De_DItj * DItj_Dw * Dw_DflowX;
float De_DflowY = De_DItj * DItj_Dw * Dw_DflowY;
// accumulate
grad_flow[i*2]+=De_DflowX;
grad_flow[i*2+1]+=De_DflowY;
grad_image_src[i*nChannelsTexture+j] += De_DItj * w / (It_w+1e-8);
}
}
}
}
void forward_flow_shift_renderer(const float* image_src,
const float* flow_src_to_tar,
const int batch_size,
const int nChannelsTexture,
const int imageH,
const int imageW,
const int kernel_radius,
const float kernel_sigma2,
float* image_tar_w_I,
float* image_tar_w,
float* mask)
{
const int threads = 512;
const dim3 blocks ((batch_size * imageH * imageW - 1) / threads + 1);
forward_flow_shift_renderer_cuda_kernel<float><<<blocks, threads>>>(image_src,
flow_src_to_tar,
batch_size,
nChannelsTexture,
imageH,
imageW,
kernel_radius,
kernel_sigma2,
image_tar_w_I,
image_tar_w,
mask);
cudaDeviceSynchronize();
}
void backward_flow_shift_renderer(const float* image_src,
const float* flow_src_to_tar,
const int batch_size,
const int nChannelsTexture,
const int imageH,
const int imageW,
const int kernel_radius,
const float kernel_sigma2,
const float* image_tar_w_I,
const float* image_tar_w,
const float* grad_image_target,
float* grad_image_src,
float* grad_flow)
{
const int threads = 512;
const dim3 blocks ((batch_size * imageH * imageW - 1) / threads + 1);
backward_flow_shift_renderer_cuda_kernel<float><<<blocks, threads>>>(image_src,
flow_src_to_tar,
batch_size,
nChannelsTexture,
imageH,
imageW,
kernel_radius,
kernel_sigma2,
image_tar_w_I,
image_tar_w,
grad_image_target,
grad_image_src,
grad_flow);
cudaDeviceSynchronize();
}
|
238981e4bec212149a811c61d08a674184d6696e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include "helper_cuda.h"
typedef unsigned int uint;
extern "C"
{
void allocateArray(void **devPtr, size_t size)
{
checkCudaErrors(hipMalloc(devPtr, size));
}
void freeArray(void *devPtr)
{
checkCudaErrors(hipFree(devPtr));
}
void cudaInit()
{
// use device with highest Gflops/s
int devID = findCudaDevice();
if (devID < 0)
{
printf("No CUDA Capable devices found, exiting...\n");
exit(EXIT_SUCCESS);
}
}
void copyArrayToDevice(void *device, const void *host, int offset, int size)
{
checkCudaErrors(hipMemcpy((char *) device + offset, host, size, hipMemcpyHostToDevice));
}
void registerGLBufferObject(unsigned int vbo, struct cudaGraphicsResource **cuda_vbo_resource)
{
checkCudaErrors(hipGraphicsGLRegisterBuffer(cuda_vbo_resource, vbo,
hipGraphicsMapFlagsNone));
}
void unregisterGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
checkCudaErrors(hipGraphicsUnregisterResource(cuda_vbo_resource));
}
void *mapGLBufferObject(struct cudaGraphicsResource **cuda_vbo_resource)
{
void *ptr;
checkCudaErrors(hipGraphicsMapResources(1, cuda_vbo_resource, 0));
size_t num_bytes;
checkCudaErrors(hipGraphicsResourceGetMappedPointer((void **)&ptr, &num_bytes, *cuda_vbo_resource));
return ptr;
}
void unmapGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
checkCudaErrors(hipGraphicsUnmapResources(1, &cuda_vbo_resource, 0));
}
void copyArrayFromDevice(void *host, const void *device, int size)
{
checkCudaErrors(hipMemcpy(host, device, size, hipMemcpyDeviceToHost));
}
//Round a / b to nearest higher integer value
uint iDivUp(uint a, uint b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
// compute grid and thread block size for a given number of elements
void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = iDivUp(n, numThreads);
}
}
| 238981e4bec212149a811c61d08a674184d6696e.cu |
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include "helper_cuda.h"
typedef unsigned int uint;
extern "C"
{
void allocateArray(void **devPtr, size_t size)
{
checkCudaErrors(cudaMalloc(devPtr, size));
}
void freeArray(void *devPtr)
{
checkCudaErrors(cudaFree(devPtr));
}
void cudaInit()
{
// use device with highest Gflops/s
int devID = findCudaDevice();
if (devID < 0)
{
printf("No CUDA Capable devices found, exiting...\n");
exit(EXIT_SUCCESS);
}
}
void copyArrayToDevice(void *device, const void *host, int offset, int size)
{
checkCudaErrors(cudaMemcpy((char *) device + offset, host, size, cudaMemcpyHostToDevice));
}
void registerGLBufferObject(unsigned int vbo, struct cudaGraphicsResource **cuda_vbo_resource)
{
checkCudaErrors(cudaGraphicsGLRegisterBuffer(cuda_vbo_resource, vbo,
cudaGraphicsMapFlagsNone));
}
void unregisterGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
checkCudaErrors(cudaGraphicsUnregisterResource(cuda_vbo_resource));
}
void *mapGLBufferObject(struct cudaGraphicsResource **cuda_vbo_resource)
{
void *ptr;
checkCudaErrors(cudaGraphicsMapResources(1, cuda_vbo_resource, 0));
size_t num_bytes;
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void **)&ptr, &num_bytes, *cuda_vbo_resource));
return ptr;
}
void unmapGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
checkCudaErrors(cudaGraphicsUnmapResources(1, &cuda_vbo_resource, 0));
}
void copyArrayFromDevice(void *host, const void *device, int size)
{
checkCudaErrors(cudaMemcpy(host, device, size, cudaMemcpyDeviceToHost));
}
//Round a / b to nearest higher integer value
uint iDivUp(uint a, uint b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
// compute grid and thread block size for a given number of elements
void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = iDivUp(n, numThreads);
}
}
|
c87c19f9b9e5bac1720bc77f58c16f248d62e8f3.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <timer.h>
#define BLOCK 16
#define WIDTH 1024
float h_A[WIDTH * WIDTH];
float h_B[WIDTH * WIDTH];
float h_C[WIDTH * WIDTH];
float *d_A, *d_B, *d_C;
void h_multiply(float *A, float *B, float *C);
__global__ void d_multiply(float *A, float *B, float *C);
int main() {
unsigned int i;
hipMalloc((void**)&d_A, sizeof(float) * WIDTH * WIDTH);
hipMalloc((void**)&d_B, sizeof(float) * WIDTH * WIDTH);
hipMalloc((void**)&d_C, sizeof(float) * WIDTH * WIDTH);
for (i = 0; i < (WIDTH * WIDTH); i++) {
h_A[i] = (float)i;
h_B[i] = (float)i;
}
StartTimer();
hipMemcpy(d_A, h_A, sizeof(float) * WIDTH * WIDTH), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, sizeof(float) * WIDTH * WIDTH), hipMemcpyHostToDevice);
dim3 grid(WIDTH/BLOCK, WIDTH/BLOCK);
dim3 block(BLOCK, BLOCK);
hipLaunchKernelGGL(( d_multiply0) , dim3(grid),dim3(block) , 0, 0, d_A,d_B,d_C);
hipMemcpy(h_B, d_B, sizeof(float) * WIDTH * WIDTH), hipMemcpyDeviceToHost);
printf(": %f(ms)", GetTimer());
printf(": %f\n", h_C[WIDTH*WIDTH-1]);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
//
StartTimer();
h_multiply(h_A, h_B, h_C);
printf("%f\n", );
printf("%f\n", h_C[WIDTH * WIDTH - 1]);
}
void h_multiply(float *A, float *B, float *C) {
unsigned int r, c, i;
float tmp;
for (int r = 0; r < WIDTH; r++) {
for (int c = 0; c < WIDTH; c++) {
tmp = 0.0;
for (int i = 0; i < WIDTH; i++) {
tmp += A[WIDTH * r + i] * B[WIDTH * i + c];
}
C[WIDTH * r + c] = tmp;
}
}
}
__global__ void d_multiply0(float *A, float *B, float *C){
unsigned int r= blockDim.y * blockIdx.y + threadIdx.y;
unsigned int c= blockDim.x * blockIdx.x + threadIdx.x;
unsigned int i;
flaot tmp;
tmp =0.0f;
for(i=0; i <WIDTH; i++){
tmp +=A[WIDTH * r + i] * B[WIDTH * i + c];
}
C[WIDTH * r + c] = tmp;
}
__global__ void d_multiply1(float *A, float *B, float *C){
unsigned int r= blockDim.y * blockIdx.y + threadIdx.y;
unsigned int c= blockDim.x * blockIdx.x + threadIdx.x;
unsigned int i,j;
float tmp;
__shared__ float s_A[BLOCK][BLOCK];
__shared__ float s_B[BLOCK][BLOCK];
tmp = 0.0f;
for (i =0; i< WIDTH, i += BLOCK){
s_A[thredIdx.y][threadIdx.x] = A[WIDTH * r + i + threadIdx.x];
s_B[thredIdx.y][threadIdx.x] = B[WIDTH * (i + threadIdx.y) + c ];
__syncthreads();
for(j=0; j < BLOCK; j++){
tmp+= s_A[thredIdx.y][j] * s_B[j][threadIdx.x];
__syncthreads();
}
c[WIDTH * r + c] = tmp;
} | c87c19f9b9e5bac1720bc77f58c16f248d62e8f3.cu | #include <cstdio>
#include <stdio.h>
#include <cuda_runtime.h>
#include <timer.h>
#define BLOCK 16
#define WIDTH 1024
float h_A[WIDTH * WIDTH];
float h_B[WIDTH * WIDTH];
float h_C[WIDTH * WIDTH];
float *d_A, *d_B, *d_C;
void h_multiply(float *A, float *B, float *C);
__global__ void d_multiply(float *A, float *B, float *C);
int main() {
unsigned int i;
cudaMalloc((void**)&d_A, sizeof(float) * WIDTH * WIDTH);
cudaMalloc((void**)&d_B, sizeof(float) * WIDTH * WIDTH);
cudaMalloc((void**)&d_C, sizeof(float) * WIDTH * WIDTH);
for (i = 0; i < (WIDTH * WIDTH); i++) {
h_A[i] = (float)i;
h_B[i] = (float)i;
}
StartTimer();
cudaMemcpy(d_A, h_A, sizeof(float) * WIDTH * WIDTH), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, sizeof(float) * WIDTH * WIDTH), cudaMemcpyHostToDevice);
dim3 grid(WIDTH/BLOCK, WIDTH/BLOCK);
dim3 block(BLOCK, BLOCK);
d_multiply0 <<<grid,block >>>(d_A,d_B,d_C);
cudaMemcpy(h_B, d_B, sizeof(float) * WIDTH * WIDTH), cudaMemcpyDeviceToHost);
printf("デバイス計算時間: %f(ms)", GetTimer());
printf("デバイス計算結果: %f\n", h_C[WIDTH*WIDTH-1]);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
//
StartTimer();
h_multiply(h_A, h_B, h_C);
printf("ホスト計算時間:%f\n", );
printf("ホスト計算結果:%f\n", h_C[WIDTH * WIDTH - 1]);
}
void h_multiply(float *A, float *B, float *C) {
unsigned int r, c, i;
float tmp;
for (int r = 0; r < WIDTH; r++) {
for (int c = 0; c < WIDTH; c++) {
tmp = 0.0;
for (int i = 0; i < WIDTH; i++) {
tmp += A[WIDTH * r + i] * B[WIDTH * i + c];
}
C[WIDTH * r + c] = tmp;
}
}
}
__global__ void d_multiply0(float *A, float *B, float *C){
unsigned int r= blockDim.y * blockIdx.y + threadIdx.y;
unsigned int c= blockDim.x * blockIdx.x + threadIdx.x;
unsigned int i;
flaot tmp;
tmp =0.0f;
for(i=0; i <WIDTH; i++){
tmp +=A[WIDTH * r + i] * B[WIDTH * i + c];
}
C[WIDTH * r + c] = tmp;
}
__global__ void d_multiply1(float *A, float *B, float *C){
unsigned int r= blockDim.y * blockIdx.y + threadIdx.y;
unsigned int c= blockDim.x * blockIdx.x + threadIdx.x;
unsigned int i,j;
float tmp;
__shared__ float s_A[BLOCK][BLOCK];
__shared__ float s_B[BLOCK][BLOCK];
tmp = 0.0f;
for (i =0; i< WIDTH, i += BLOCK){
s_A[thredIdx.y][threadIdx.x] = A[WIDTH * r + i + threadIdx.x];
s_B[thredIdx.y][threadIdx.x] = B[WIDTH * (i + threadIdx.y) + c ];
__syncthreads();
for(j=0; j < BLOCK; j++){
tmp+= s_A[thredIdx.y][j] * s_B[j][threadIdx.x];
__syncthreads();
}
c[WIDTH * r + c] = tmp;
} |
d50daaae3e9650b96ab01d83d9355942523ce4b8.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright 2017
#ifdef HAVE_MPI
#include "mpi.h"
#endif
#ifdef HAVE_CUDA
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#endif
#include "weight_function.h"
#include "basis/qc_basis.h"
#include "qc_monte.h"
int main(int argc, char* argv[]) {
#ifdef HAVE_MPI
MPI_Init(&argc, &argv);
#endif
MPI_info mpi_info;
if (argc != 2) {
if (mpi_info.sys_master) {
printf("Usage: mcmpN.x <input>\n");
}
exit(EXIT_FAILURE);
} else {
if (mpi_info.sys_master) {
printf("MC-GFn program developed by the Hirata lab\n");
printf("Code compiled from Git-Commit %s\n\n", VERSION);
}
}
mpi_info.print();
IOPs iops;
iops.read(mpi_info, argv[1]);
iops.print(mpi_info, argv[1]);
Molec molec;
molec.read(mpi_info, iops.sopns[KEYS::GEOM]);
Basis basis(iops, mpi_info, molec);
GTO_Weight mc_basis;
mc_basis.read(mpi_info, molec, iops.sopns[KEYS::MC_BASIS]);
#ifdef HAVE_CUDA
int deviceCount;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess) {
printf("hipGetDeviceCount returned %d\n-> %s\n",
static_cast<int>(error_id), hipGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
if (iops.iopns[KEYS::CPU] == 1 || deviceCount == 0) {
#endif
if (iops.iopns[KEYS::TASK] == TASKS::MP) {
if (iops.iopns[KEYS::ORDER] == 2) {
MP2 qc_monte(mpi_info, iops, molec, basis, mc_basis);
qc_monte.monte_energy();
} else if (iops.iopns[KEYS::ORDER] == 3) {
MP3 qc_monte(mpi_info, iops, molec, basis, mc_basis);
qc_monte.monte_energy();
} else if (iops.iopns[KEYS::ORDER] == 4) {
MP4 qc_monte(mpi_info, iops, molec, basis, mc_basis);
qc_monte.monte_energy();
}
} else {
if (iops.iopns[KEYS::ORDER] == 2) {
GF2 qc_monte(mpi_info, iops, molec, basis, mc_basis);
qc_monte.monte_energy();
} else if (iops.iopns[KEYS::ORDER] == 3) {
GF3 qc_monte(mpi_info, iops, molec, basis, mc_basis);
qc_monte.monte_energy();
}
}
#ifdef HAVE_CUDA
} else {
if (iops.iopns[KEYS::TASK] == TASKS::MP) {
if (iops.iopns[KEYS::ORDER] == 2) {
MP2 qc_monte(mpi_info, iops, molec, basis, mc_basis);
qc_monte.monte_energy();
} else if (iops.iopns[KEYS::ORDER] == 3) {
MP3 qc_monte(mpi_info, iops, molec, basis, mc_basis);
qc_monte.monte_energy();
}
} else {
if (iops.iopns[KEYS::ORDER] == 2) {
// GPU_GF2 qc_monte(mpi_info, iops, molec, basis, mc_basis);
// qc_monte.monte_energy();
} else if (iops.iopns[KEYS::ORDER] == 3) {
// GPU_GF3 qc_monte(mpi_info, iops, molec, basis, mc_basis);
// qc_monte.monte_energy();
}
}
}
#endif
#ifdef HAVE_MPI
MPI_Finalize();
#endif
}
| d50daaae3e9650b96ab01d83d9355942523ce4b8.cu | // Copyright 2017
#ifdef HAVE_MPI
#include "mpi.h"
#endif
#ifdef HAVE_CUDA
#include <cuda.h>
#include <cuda_runtime.h>
#endif
#include "weight_function.h"
#include "basis/qc_basis.h"
#include "qc_monte.h"
int main(int argc, char* argv[]) {
#ifdef HAVE_MPI
MPI_Init(&argc, &argv);
#endif
MPI_info mpi_info;
if (argc != 2) {
if (mpi_info.sys_master) {
printf("Usage: mcmpN.x <input>\n");
}
exit(EXIT_FAILURE);
} else {
if (mpi_info.sys_master) {
printf("MC-GFn program developed by the Hirata lab\n");
printf("Code compiled from Git-Commit %s\n\n", VERSION);
}
}
mpi_info.print();
IOPs iops;
iops.read(mpi_info, argv[1]);
iops.print(mpi_info, argv[1]);
Molec molec;
molec.read(mpi_info, iops.sopns[KEYS::GEOM]);
Basis basis(iops, mpi_info, molec);
GTO_Weight mc_basis;
mc_basis.read(mpi_info, molec, iops.sopns[KEYS::MC_BASIS]);
#ifdef HAVE_CUDA
int deviceCount;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess) {
printf("cudaGetDeviceCount returned %d\n-> %s\n",
static_cast<int>(error_id), cudaGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
if (iops.iopns[KEYS::CPU] == 1 || deviceCount == 0) {
#endif
if (iops.iopns[KEYS::TASK] == TASKS::MP) {
if (iops.iopns[KEYS::ORDER] == 2) {
MP2 qc_monte(mpi_info, iops, molec, basis, mc_basis);
qc_monte.monte_energy();
} else if (iops.iopns[KEYS::ORDER] == 3) {
MP3 qc_monte(mpi_info, iops, molec, basis, mc_basis);
qc_monte.monte_energy();
} else if (iops.iopns[KEYS::ORDER] == 4) {
MP4 qc_monte(mpi_info, iops, molec, basis, mc_basis);
qc_monte.monte_energy();
}
} else {
if (iops.iopns[KEYS::ORDER] == 2) {
GF2 qc_monte(mpi_info, iops, molec, basis, mc_basis);
qc_monte.monte_energy();
} else if (iops.iopns[KEYS::ORDER] == 3) {
GF3 qc_monte(mpi_info, iops, molec, basis, mc_basis);
qc_monte.monte_energy();
}
}
#ifdef HAVE_CUDA
} else {
if (iops.iopns[KEYS::TASK] == TASKS::MP) {
if (iops.iopns[KEYS::ORDER] == 2) {
MP2 qc_monte(mpi_info, iops, molec, basis, mc_basis);
qc_monte.monte_energy();
} else if (iops.iopns[KEYS::ORDER] == 3) {
MP3 qc_monte(mpi_info, iops, molec, basis, mc_basis);
qc_monte.monte_energy();
}
} else {
if (iops.iopns[KEYS::ORDER] == 2) {
// GPU_GF2 qc_monte(mpi_info, iops, molec, basis, mc_basis);
// qc_monte.monte_energy();
} else if (iops.iopns[KEYS::ORDER] == 3) {
// GPU_GF3 qc_monte(mpi_info, iops, molec, basis, mc_basis);
// qc_monte.monte_energy();
}
}
}
#endif
#ifdef HAVE_MPI
MPI_Finalize();
#endif
}
|
7a0d007024540d842ce5ac96a6349fcf24a25122.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 15
#define F 2
#define ITERATIONS (unsigned)( 10000 )
#define ITERATIONS2 REPLACE_ITERATIONS
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
#define LINE_SIZE 128
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int N){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
//int size = (LINE_SIZE*ASSOC*SETS)/sizeof(int);
//unsigned j=0, k=0;
int m_sum=N;
// m_sum = A[tid*F];
for(unsigned j=0; j<ITERATIONS2; j++){
for(unsigned k=0; k<ITERATIONS; ++k){
C[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))]=m_sum;
}
m_sum+=j;
}
__syncthreads();
}
// Host code
int main(){
printf("Power Microbenchmarks\n");
//int N = LINE_SIZE*SETS*ASSOC;
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
//checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
//checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
hipFree(d_A);
//if (d_B)
// hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
| 7a0d007024540d842ce5ac96a6349fcf24a25122.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 15
#define F 2
#define ITERATIONS (unsigned)( 10000 )
#define ITERATIONS2 REPLACE_ITERATIONS
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
#define LINE_SIZE 128
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int N){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
//int size = (LINE_SIZE*ASSOC*SETS)/sizeof(int);
//unsigned j=0, k=0;
int m_sum=N;
// m_sum = A[tid*F];
for(unsigned j=0; j<ITERATIONS2; j++){
for(unsigned k=0; k<ITERATIONS; ++k){
C[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))]=m_sum;
}
m_sum+=j;
}
__syncthreads();
}
// Host code
int main(){
printf("Power Microbenchmarks\n");
//int N = LINE_SIZE*SETS*ASSOC;
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
//checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
//checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
cudaFree(d_A);
//if (d_B)
// cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
3e36ff0657e9feb6b770ccb37c5364908df785e0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
*/
// TSP solver tests
// Author: Hugo Linsenmaier [email protected]
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
*/
// TSP solver tests
// Author: Hugo Linsenmaier [email protected]
#include <utilities/high_res_clock.h>
#include <utilities/base_fixture.hpp>
#include <utilities/test_utilities.hpp>
#include <algorithms.hpp>
#include <graph.hpp>
#include <hip/hip_runtime_api.h>
#include <raft/error.hpp>
#include <raft/handle.hpp>
#include <rmm/device_uvector.hpp>
#include <fstream>
#include <set>
#include <vector>
typedef struct Tsp_Usecase_t {
std::string tsp_file;
float ref_cost;
Tsp_Usecase_t(const std::string& a, const float c)
{
// assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR
const std::string& rapidsDatasetRootDir = cugraph::test::get_rapids_dataset_root_dir();
if ((a != "") && (a[0] != '/')) {
tsp_file = rapidsDatasetRootDir + "/" + a;
} else {
tsp_file = a;
}
ref_cost = c;
}
Tsp_Usecase_t& operator=(const Tsp_Usecase_t& rhs)
{
tsp_file = rhs.tsp_file;
ref_cost = rhs.ref_cost;
return *this;
}
} Tsp_Usecase;
static std::vector<Tsp_Usecase_t> euc_2d{
{"tsplib/datasets/a280.tsp", 2579}, {"tsplib/datasets/berlin52.tsp", 7542},
{"tsplib/datasets/bier127.tsp", 118282}, {"tsplib/datasets/ch130.tsp", 6110},
{"tsplib/datasets/ch150.tsp", 6528}, {"tsplib/datasets/d1291.tsp", 50801},
{"tsplib/datasets/d1655.tsp", 62128}, {"tsplib/datasets/d198.tsp", 15780},
{"tsplib/datasets/d2103.tsp", 80450}, {"tsplib/datasets/d493.tsp", 35002},
{"tsplib/datasets/d657.tsp", 48912}, {"tsplib/datasets/eil101.tsp", 629},
{"tsplib/datasets/eil51.tsp", 426}, {"tsplib/datasets/eil76.tsp", 538},
{"tsplib/datasets/fl1400.tsp", 20127}, {"tsplib/datasets/fl1577.tsp", 22249},
{"tsplib/datasets/fl417.tsp", 11861}, {"tsplib/datasets/gil262.tsp", 2378},
{"tsplib/datasets/kroA100.tsp", 21282}, {"tsplib/datasets/kroA150.tsp", 26524},
{"tsplib/datasets/kroA200.tsp", 29368}, {"tsplib/datasets/kroB100.tsp", 22141},
{"tsplib/datasets/kroB150.tsp", 26130}, {"tsplib/datasets/kroB200.tsp", 29437},
{"tsplib/datasets/kroC100.tsp", 20749}, {"tsplib/datasets/kroD100.tsp", 21294},
{"tsplib/datasets/kroE100.tsp", 22068}, {"tsplib/datasets/lin105.tsp", 14379},
{"tsplib/datasets/lin318.tsp", 42029}, {"tsplib/datasets/nrw1379.tsp", 56638},
{"tsplib/datasets/p654.tsp", 34643}, {"tsplib/datasets/pcb1173.tsp", 56892},
{"tsplib/datasets/pcb442.tsp", 50778}, {"tsplib/datasets/pr1002.tsp", 259045},
{"tsplib/datasets/pr107.tsp", 44303}, {"tsplib/datasets/pr136.tsp", 96772},
{"tsplib/datasets/pr144.tsp", 58537}, {"tsplib/datasets/pr152.tsp", 73682},
{"tsplib/datasets/pr226.tsp", 80369}, {"tsplib/datasets/pr264.tsp", 49135},
{"tsplib/datasets/pr299.tsp", 48191}, {"tsplib/datasets/pr439.tsp", 107217},
{"tsplib/datasets/pr76.tsp", 108159}, {"tsplib/datasets/rat195.tsp", 2323},
{"tsplib/datasets/rat575.tsp", 6773}, {"tsplib/datasets/rat783.tsp", 8806},
{"tsplib/datasets/rat99.tsp", 1211}, {"tsplib/datasets/rd100.tsp", 7910},
{"tsplib/datasets/rd400.tsp", 15281}, {"tsplib/datasets/rl1323.tsp", 270199},
{"tsplib/datasets/st70.tsp", 675}, {"tsplib/datasets/ts225.tsp", 126643},
{"tsplib/datasets/tsp225.tsp", 3916}, {"tsplib/datasets/u1060.tsp", 224094},
{"tsplib/datasets/u1432.tsp", 152970}, {"tsplib/datasets/u159.tsp", 42080},
{"tsplib/datasets/u574.tsp", 36905}, {"tsplib/datasets/u724.tsp", 41910},
{"tsplib/datasets/vm1084.tsp", 239297},
};
struct Route {
std::vector<int> cities;
std::vector<float> x_pos;
std::vector<float> y_pos;
};
class Tests_Tsp : public ::testing::TestWithParam<Tsp_Usecase> {
public:
Tests_Tsp() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
void run_current_test(const Tsp_Usecase& param)
{
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
std::stringstream ss;
std::string test_id = std::string(test_info->test_case_name()) + std::string(".") +
std::string(test_info->name()) + std::string("_") +
cugraph::test::getFileName(param.tsp_file) + std::string("_") +
ss.str().c_str();
float tol = 1E-1f;
HighResClock hr_clock;
double time_tmp;
Route input;
std::cout << "File: " << param.tsp_file.c_str() << "\n";
int nodes = load_tsp(param.tsp_file.c_str(), &input);
// Device alloc
raft::handle_t const handle;
auto stream = handle.get_stream();
rmm::device_uvector<int> vertices(static_cast<size_t>(nodes), stream);
rmm::device_uvector<int> route(static_cast<size_t>(nodes), stream);
rmm::device_uvector<float> x_pos(static_cast<size_t>(nodes), stream);
rmm::device_uvector<float> y_pos(static_cast<size_t>(nodes), stream);
int* vtx_ptr = vertices.data();
int* d_route = route.data();
float* d_x_pos = x_pos.data();
float* d_y_pos = y_pos.data();
CUDA_TRY(hipMemcpy(vtx_ptr, input.cities.data(), sizeof(int) * nodes, hipMemcpyHostToDevice));
CUDA_TRY(
hipMemcpy(d_x_pos, input.x_pos.data(), sizeof(float) * nodes, hipMemcpyHostToDevice));
CUDA_TRY(
hipMemcpy(d_y_pos, input.y_pos.data(), sizeof(float) * nodes, hipMemcpyHostToDevice));
// Default parameters
int restarts = 4096;
bool beam_search = true;
int k = 4;
int nstart = 0;
bool verbose = false;
hr_clock.start();
hipDeviceSynchronize();
hipProfilerStart();
float final_cost = cugraph::traveling_salesperson(
handle, vtx_ptr, d_x_pos, d_y_pos, nodes, restarts, beam_search, k, nstart, verbose, d_route);
hipProfilerStop();
hipDeviceSynchronize();
hr_clock.stop(&time_tmp);
std::vector<int> h_route;
h_route.resize(nodes);
std::vector<int> h_vertices;
h_vertices.resize(nodes);
CUDA_TRY(hipMemcpy(h_route.data(), d_route, sizeof(int) * nodes, hipMemcpyDeviceToHost));
hipDeviceSynchronize();
CUDA_TRY(hipMemcpy(h_vertices.data(), vtx_ptr, sizeof(int) * nodes, hipMemcpyDeviceToHost));
hipDeviceSynchronize();
std::cout << "tsp_time: " << time_tmp << " us" << std::endl;
std::cout << "Ref cost is: " << param.ref_cost << "\n";
std::cout << "Final cost is: " << final_cost << "\n";
float err = fabs(final_cost - param.ref_cost);
err /= param.ref_cost;
std::cout << "Approximation error is: " << err * 100 << "%\n";
EXPECT_LE(err, tol);
// Check route goes through each vertex once
size_t u_nodes = nodes;
std::set<int> node_set(h_route.begin(), h_route.end());
ASSERT_EQ(node_set.size(), u_nodes);
// Bound check
int max = *std::max_element(h_vertices.begin(), h_vertices.end());
int min = *std::min_element(h_vertices.begin(), h_vertices.end());
EXPECT_GE(*node_set.begin(), min);
EXPECT_LE(*node_set.rbegin(), max);
}
private:
std::vector<std::string> split(const std::string& s, char delimiter)
{
std::vector<std::string> tokens;
std::string token;
std::istringstream tokenStream(s);
while (std::getline(tokenStream, token, delimiter)) {
if (token.size() == 0) continue;
tokens.push_back(token);
}
return tokens;
}
// FIXME: At the moment TSP does not accept a graph_t as input and therefore
// deviates from the standard testing I/O pattern. Once other input types
// are supported we want to reconcile TSP testing with the rest of cugraph.
int load_tsp(const char* fname, Route* input)
{
std::fstream fs;
fs.open(fname);
std::string line;
std::vector<std::string> tokens;
int nodes = 0;
while (std::getline(fs, line) && line.find(':') != std::string::npos) {
tokens = split(line, ':');
auto strip_token = split(tokens[0], ' ')[0];
if (strip_token == "DIMENSION") nodes = std::stof(tokens[1]);
}
while (std::getline(fs, line) && line.find(' ') != std::string::npos) {
tokens = split(line, ' ');
auto city_id = std::stof(tokens[0]);
auto x = std::stof(tokens[1]);
auto y = std::stof(tokens[2]);
input->cities.push_back(city_id);
input->x_pos.push_back(x);
input->y_pos.push_back(y);
}
fs.close();
assert(nodes == input->cities.size());
return nodes;
}
};
TEST_P(Tests_Tsp, CheckFP32_T) { run_current_test(GetParam()); }
INSTANTIATE_TEST_CASE_P(simple_test, Tests_Tsp, ::testing::ValuesIn(euc_2d));
CUGRAPH_TEST_PROGRAM_MAIN()
| 3e36ff0657e9feb6b770ccb37c5364908df785e0.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
*/
// TSP solver tests
// Author: Hugo Linsenmaier [email protected]
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
*/
// TSP solver tests
// Author: Hugo Linsenmaier [email protected]
#include <utilities/high_res_clock.h>
#include <utilities/base_fixture.hpp>
#include <utilities/test_utilities.hpp>
#include <algorithms.hpp>
#include <graph.hpp>
#include <cuda_profiler_api.h>
#include <raft/error.hpp>
#include <raft/handle.hpp>
#include <rmm/device_uvector.hpp>
#include <fstream>
#include <set>
#include <vector>
typedef struct Tsp_Usecase_t {
std::string tsp_file;
float ref_cost;
Tsp_Usecase_t(const std::string& a, const float c)
{
// assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR
const std::string& rapidsDatasetRootDir = cugraph::test::get_rapids_dataset_root_dir();
if ((a != "") && (a[0] != '/')) {
tsp_file = rapidsDatasetRootDir + "/" + a;
} else {
tsp_file = a;
}
ref_cost = c;
}
Tsp_Usecase_t& operator=(const Tsp_Usecase_t& rhs)
{
tsp_file = rhs.tsp_file;
ref_cost = rhs.ref_cost;
return *this;
}
} Tsp_Usecase;
static std::vector<Tsp_Usecase_t> euc_2d{
{"tsplib/datasets/a280.tsp", 2579}, {"tsplib/datasets/berlin52.tsp", 7542},
{"tsplib/datasets/bier127.tsp", 118282}, {"tsplib/datasets/ch130.tsp", 6110},
{"tsplib/datasets/ch150.tsp", 6528}, {"tsplib/datasets/d1291.tsp", 50801},
{"tsplib/datasets/d1655.tsp", 62128}, {"tsplib/datasets/d198.tsp", 15780},
{"tsplib/datasets/d2103.tsp", 80450}, {"tsplib/datasets/d493.tsp", 35002},
{"tsplib/datasets/d657.tsp", 48912}, {"tsplib/datasets/eil101.tsp", 629},
{"tsplib/datasets/eil51.tsp", 426}, {"tsplib/datasets/eil76.tsp", 538},
{"tsplib/datasets/fl1400.tsp", 20127}, {"tsplib/datasets/fl1577.tsp", 22249},
{"tsplib/datasets/fl417.tsp", 11861}, {"tsplib/datasets/gil262.tsp", 2378},
{"tsplib/datasets/kroA100.tsp", 21282}, {"tsplib/datasets/kroA150.tsp", 26524},
{"tsplib/datasets/kroA200.tsp", 29368}, {"tsplib/datasets/kroB100.tsp", 22141},
{"tsplib/datasets/kroB150.tsp", 26130}, {"tsplib/datasets/kroB200.tsp", 29437},
{"tsplib/datasets/kroC100.tsp", 20749}, {"tsplib/datasets/kroD100.tsp", 21294},
{"tsplib/datasets/kroE100.tsp", 22068}, {"tsplib/datasets/lin105.tsp", 14379},
{"tsplib/datasets/lin318.tsp", 42029}, {"tsplib/datasets/nrw1379.tsp", 56638},
{"tsplib/datasets/p654.tsp", 34643}, {"tsplib/datasets/pcb1173.tsp", 56892},
{"tsplib/datasets/pcb442.tsp", 50778}, {"tsplib/datasets/pr1002.tsp", 259045},
{"tsplib/datasets/pr107.tsp", 44303}, {"tsplib/datasets/pr136.tsp", 96772},
{"tsplib/datasets/pr144.tsp", 58537}, {"tsplib/datasets/pr152.tsp", 73682},
{"tsplib/datasets/pr226.tsp", 80369}, {"tsplib/datasets/pr264.tsp", 49135},
{"tsplib/datasets/pr299.tsp", 48191}, {"tsplib/datasets/pr439.tsp", 107217},
{"tsplib/datasets/pr76.tsp", 108159}, {"tsplib/datasets/rat195.tsp", 2323},
{"tsplib/datasets/rat575.tsp", 6773}, {"tsplib/datasets/rat783.tsp", 8806},
{"tsplib/datasets/rat99.tsp", 1211}, {"tsplib/datasets/rd100.tsp", 7910},
{"tsplib/datasets/rd400.tsp", 15281}, {"tsplib/datasets/rl1323.tsp", 270199},
{"tsplib/datasets/st70.tsp", 675}, {"tsplib/datasets/ts225.tsp", 126643},
{"tsplib/datasets/tsp225.tsp", 3916}, {"tsplib/datasets/u1060.tsp", 224094},
{"tsplib/datasets/u1432.tsp", 152970}, {"tsplib/datasets/u159.tsp", 42080},
{"tsplib/datasets/u574.tsp", 36905}, {"tsplib/datasets/u724.tsp", 41910},
{"tsplib/datasets/vm1084.tsp", 239297},
};
struct Route {
std::vector<int> cities;
std::vector<float> x_pos;
std::vector<float> y_pos;
};
class Tests_Tsp : public ::testing::TestWithParam<Tsp_Usecase> {
public:
Tests_Tsp() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
void run_current_test(const Tsp_Usecase& param)
{
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
std::stringstream ss;
std::string test_id = std::string(test_info->test_case_name()) + std::string(".") +
std::string(test_info->name()) + std::string("_") +
cugraph::test::getFileName(param.tsp_file) + std::string("_") +
ss.str().c_str();
float tol = 1E-1f;
HighResClock hr_clock;
double time_tmp;
Route input;
std::cout << "File: " << param.tsp_file.c_str() << "\n";
int nodes = load_tsp(param.tsp_file.c_str(), &input);
// Device alloc
raft::handle_t const handle;
auto stream = handle.get_stream();
rmm::device_uvector<int> vertices(static_cast<size_t>(nodes), stream);
rmm::device_uvector<int> route(static_cast<size_t>(nodes), stream);
rmm::device_uvector<float> x_pos(static_cast<size_t>(nodes), stream);
rmm::device_uvector<float> y_pos(static_cast<size_t>(nodes), stream);
int* vtx_ptr = vertices.data();
int* d_route = route.data();
float* d_x_pos = x_pos.data();
float* d_y_pos = y_pos.data();
CUDA_TRY(cudaMemcpy(vtx_ptr, input.cities.data(), sizeof(int) * nodes, cudaMemcpyHostToDevice));
CUDA_TRY(
cudaMemcpy(d_x_pos, input.x_pos.data(), sizeof(float) * nodes, cudaMemcpyHostToDevice));
CUDA_TRY(
cudaMemcpy(d_y_pos, input.y_pos.data(), sizeof(float) * nodes, cudaMemcpyHostToDevice));
// Default parameters
int restarts = 4096;
bool beam_search = true;
int k = 4;
int nstart = 0;
bool verbose = false;
hr_clock.start();
cudaDeviceSynchronize();
cudaProfilerStart();
float final_cost = cugraph::traveling_salesperson(
handle, vtx_ptr, d_x_pos, d_y_pos, nodes, restarts, beam_search, k, nstart, verbose, d_route);
cudaProfilerStop();
cudaDeviceSynchronize();
hr_clock.stop(&time_tmp);
std::vector<int> h_route;
h_route.resize(nodes);
std::vector<int> h_vertices;
h_vertices.resize(nodes);
CUDA_TRY(cudaMemcpy(h_route.data(), d_route, sizeof(int) * nodes, cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
CUDA_TRY(cudaMemcpy(h_vertices.data(), vtx_ptr, sizeof(int) * nodes, cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
std::cout << "tsp_time: " << time_tmp << " us" << std::endl;
std::cout << "Ref cost is: " << param.ref_cost << "\n";
std::cout << "Final cost is: " << final_cost << "\n";
float err = fabs(final_cost - param.ref_cost);
err /= param.ref_cost;
std::cout << "Approximation error is: " << err * 100 << "%\n";
EXPECT_LE(err, tol);
// Check route goes through each vertex once
size_t u_nodes = nodes;
std::set<int> node_set(h_route.begin(), h_route.end());
ASSERT_EQ(node_set.size(), u_nodes);
// Bound check
int max = *std::max_element(h_vertices.begin(), h_vertices.end());
int min = *std::min_element(h_vertices.begin(), h_vertices.end());
EXPECT_GE(*node_set.begin(), min);
EXPECT_LE(*node_set.rbegin(), max);
}
private:
std::vector<std::string> split(const std::string& s, char delimiter)
{
std::vector<std::string> tokens;
std::string token;
std::istringstream tokenStream(s);
while (std::getline(tokenStream, token, delimiter)) {
if (token.size() == 0) continue;
tokens.push_back(token);
}
return tokens;
}
// FIXME: At the moment TSP does not accept a graph_t as input and therefore
// deviates from the standard testing I/O pattern. Once other input types
// are supported we want to reconcile TSP testing with the rest of cugraph.
int load_tsp(const char* fname, Route* input)
{
std::fstream fs;
fs.open(fname);
std::string line;
std::vector<std::string> tokens;
int nodes = 0;
while (std::getline(fs, line) && line.find(':') != std::string::npos) {
tokens = split(line, ':');
auto strip_token = split(tokens[0], ' ')[0];
if (strip_token == "DIMENSION") nodes = std::stof(tokens[1]);
}
while (std::getline(fs, line) && line.find(' ') != std::string::npos) {
tokens = split(line, ' ');
auto city_id = std::stof(tokens[0]);
auto x = std::stof(tokens[1]);
auto y = std::stof(tokens[2]);
input->cities.push_back(city_id);
input->x_pos.push_back(x);
input->y_pos.push_back(y);
}
fs.close();
assert(nodes == input->cities.size());
return nodes;
}
};
TEST_P(Tests_Tsp, CheckFP32_T) { run_current_test(GetParam()); }
INSTANTIATE_TEST_CASE_P(simple_test, Tests_Tsp, ::testing::ValuesIn(euc_2d));
CUGRAPH_TEST_PROGRAM_MAIN()
|
b97e7a54c69f25af3e5df5cb526943cdcc04d449.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/inference/tensorrt/plugin/recover_padding_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
__global__ void RecoverPaddingKernel(const float* input0,
const int32_t* input1,
float* output) {
int word_id = blockIdx.x * gridDim.y + blockIdx.y;
int32_t seqence_length = input1[blockIdx.x + 1] - input1[blockIdx.x];
if (blockIdx.y < seqence_length) {
output[word_id * gridDim.z * blockDim.x + blockIdx.z * blockDim.x +
threadIdx.x] =
input0[(input1[blockIdx.x] + blockIdx.y) * gridDim.z * blockDim.x +
blockIdx.z * blockDim.x + threadIdx.x];
} else {
output[word_id * gridDim.z * blockDim.x + blockIdx.z * blockDim.x +
threadIdx.x] = 0;
}
}
nvinfer1::DataType RecoverPaddingPlugin::getOutputDataType(
int index,
const nvinfer1::DataType* input_types,
int nb_inputs) const TRT_NOEXCEPT {
return input_types[0];
}
nvinfer1::DimsExprs RecoverPaddingPlugin::getOutputDimensions(
int outputIndex,
const nvinfer1::DimsExprs* inputs,
int nbInputs,
nvinfer1::IExprBuilder& exprBuilder) TRT_NOEXCEPT {
nvinfer1::DimsExprs output_dims{};
output_dims.nbDims = 3;
const auto* one = exprBuilder.constant(1);
output_dims.d[0] = exprBuilder.operation(
nvinfer1::DimensionOperation::kSUB, *inputs[1].d[0], *one);
output_dims.d[1] = inputs[2].d[1];
output_dims.d[2] = inputs[0].d[1];
return output_dims;
}
bool RecoverPaddingPlugin::supportsFormatCombination(
int pos,
const nvinfer1::PluginTensorDesc* inOut,
int nbInputs,
int nbOutputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(nbInputs,
3,
platform::errors::InvalidArgument("Must have 3 inputs, "
"but got %d input(s). ",
nbInputs));
PADDLE_ENFORCE_EQ(nbOutputs,
getNbOutputs(),
platform::errors::InvalidArgument("Must have 1 output, "
"but got %d output(s). ",
nbOutputs));
if (pos == 1) { // PosId
return inOut[pos].type == nvinfer1::DataType::kINT32 &&
inOut[pos].format == nvinfer1::TensorFormat::kLINEAR;
} else if (pos == 2) { // mask_id
return inOut[pos].type == nvinfer1::DataType::kFLOAT &&
inOut[pos].format == nvinfer1::TensorFormat::kLINEAR;
} else {
return inOut[pos].type == nvinfer1::DataType::kFLOAT &&
inOut[pos].format == nvinfer1::TensorFormat::kLINEAR;
}
// return (inOut[pos].type == nvinfer1::DataType::kFLOAT && inOut[pos].format
// == nvinfer1::TensorFormat::kLINEAR)||
// (inOut[pos].type == nvinfer1::DataType::kHALF && inOut[pos].format ==
// nvinfer1::TensorFormat::kLINEAR)||
// (inOut[pos].type == nvinfer1::DataType::kINT8 && inOut[pos].format ==
// nvinfer1::TensorFormat::kCHW32);
}
void RecoverPaddingPlugin::configurePlugin(
const nvinfer1::DynamicPluginTensorDesc* inputs,
int nbInputs,
const nvinfer1::DynamicPluginTensorDesc* outputs,
int nbOutputs) TRT_NOEXCEPT {}
void RecoverPaddingPlugin::attachToContext(
cudnnContext* cudnnContext,
cublasContext* cublasContext,
nvinfer1::IGpuAllocator* gpuAllocator) TRT_NOEXCEPT {}
void RecoverPaddingPlugin::detachFromContext() TRT_NOEXCEPT {}
void RecoverPaddingPlugin::terminate() TRT_NOEXCEPT {}
int RecoverPaddingPlugin::enqueue(const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc,
const void* const* inputs,
void* const* outputs,
void* workspace,
hipStream_t stream) TRT_NOEXCEPT {
const auto input0_desc = inputDesc[0];
const auto input1_desc = inputDesc[1];
const auto input2_desc = inputDesc[2];
const float* input0 = static_cast<const float*>(inputs[0]);
const int32_t* input1 =
static_cast<const int32_t*>(inputs[1]); // pos_id_tensor
float* output = static_cast<float*>(outputs[0]);
const int32_t num_threads = 256;
const dim3 num_blocks(
input1_desc.dims.d[0] - 1,
input2_desc.dims.d[1],
input0_desc.dims.d[1] / num_threads); // batchs, max sequnce length
// (mask_id.dims.d[1]),
// input.dims.d[1]/256
hipLaunchKernelGGL(( RecoverPaddingKernel), dim3(num_blocks), dim3(num_threads), 0, stream,
input0, input1, output);
return hipGetLastError() != hipSuccess;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| b97e7a54c69f25af3e5df5cb526943cdcc04d449.cu | /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/inference/tensorrt/plugin/recover_padding_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
__global__ void RecoverPaddingKernel(const float* input0,
const int32_t* input1,
float* output) {
int word_id = blockIdx.x * gridDim.y + blockIdx.y;
int32_t seqence_length = input1[blockIdx.x + 1] - input1[blockIdx.x];
if (blockIdx.y < seqence_length) {
output[word_id * gridDim.z * blockDim.x + blockIdx.z * blockDim.x +
threadIdx.x] =
input0[(input1[blockIdx.x] + blockIdx.y) * gridDim.z * blockDim.x +
blockIdx.z * blockDim.x + threadIdx.x];
} else {
output[word_id * gridDim.z * blockDim.x + blockIdx.z * blockDim.x +
threadIdx.x] = 0;
}
}
nvinfer1::DataType RecoverPaddingPlugin::getOutputDataType(
int index,
const nvinfer1::DataType* input_types,
int nb_inputs) const TRT_NOEXCEPT {
return input_types[0];
}
nvinfer1::DimsExprs RecoverPaddingPlugin::getOutputDimensions(
int outputIndex,
const nvinfer1::DimsExprs* inputs,
int nbInputs,
nvinfer1::IExprBuilder& exprBuilder) TRT_NOEXCEPT {
nvinfer1::DimsExprs output_dims{};
output_dims.nbDims = 3;
const auto* one = exprBuilder.constant(1);
output_dims.d[0] = exprBuilder.operation(
nvinfer1::DimensionOperation::kSUB, *inputs[1].d[0], *one);
output_dims.d[1] = inputs[2].d[1];
output_dims.d[2] = inputs[0].d[1];
return output_dims;
}
bool RecoverPaddingPlugin::supportsFormatCombination(
int pos,
const nvinfer1::PluginTensorDesc* inOut,
int nbInputs,
int nbOutputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(nbInputs,
3,
platform::errors::InvalidArgument("Must have 3 inputs, "
"but got %d input(s). ",
nbInputs));
PADDLE_ENFORCE_EQ(nbOutputs,
getNbOutputs(),
platform::errors::InvalidArgument("Must have 1 output, "
"but got %d output(s). ",
nbOutputs));
if (pos == 1) { // PosId
return inOut[pos].type == nvinfer1::DataType::kINT32 &&
inOut[pos].format == nvinfer1::TensorFormat::kLINEAR;
} else if (pos == 2) { // mask_id
return inOut[pos].type == nvinfer1::DataType::kFLOAT &&
inOut[pos].format == nvinfer1::TensorFormat::kLINEAR;
} else {
return inOut[pos].type == nvinfer1::DataType::kFLOAT &&
inOut[pos].format == nvinfer1::TensorFormat::kLINEAR;
}
// return (inOut[pos].type == nvinfer1::DataType::kFLOAT && inOut[pos].format
// == nvinfer1::TensorFormat::kLINEAR)||
// (inOut[pos].type == nvinfer1::DataType::kHALF && inOut[pos].format ==
// nvinfer1::TensorFormat::kLINEAR)||
// (inOut[pos].type == nvinfer1::DataType::kINT8 && inOut[pos].format ==
// nvinfer1::TensorFormat::kCHW32);
}
void RecoverPaddingPlugin::configurePlugin(
const nvinfer1::DynamicPluginTensorDesc* inputs,
int nbInputs,
const nvinfer1::DynamicPluginTensorDesc* outputs,
int nbOutputs) TRT_NOEXCEPT {}
void RecoverPaddingPlugin::attachToContext(
cudnnContext* cudnnContext,
cublasContext* cublasContext,
nvinfer1::IGpuAllocator* gpuAllocator) TRT_NOEXCEPT {}
void RecoverPaddingPlugin::detachFromContext() TRT_NOEXCEPT {}
void RecoverPaddingPlugin::terminate() TRT_NOEXCEPT {}
int RecoverPaddingPlugin::enqueue(const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc,
const void* const* inputs,
void* const* outputs,
void* workspace,
cudaStream_t stream) TRT_NOEXCEPT {
const auto input0_desc = inputDesc[0];
const auto input1_desc = inputDesc[1];
const auto input2_desc = inputDesc[2];
const float* input0 = static_cast<const float*>(inputs[0]);
const int32_t* input1 =
static_cast<const int32_t*>(inputs[1]); // pos_id_tensor
float* output = static_cast<float*>(outputs[0]);
const int32_t num_threads = 256;
const dim3 num_blocks(
input1_desc.dims.d[0] - 1,
input2_desc.dims.d[1],
input0_desc.dims.d[1] / num_threads); // batchs, max sequnce length
// (mask_id.dims.d[1]),
// input.dims.d[1]/256
RecoverPaddingKernel<<<num_blocks, num_threads, 0, stream>>>(
input0, input1, output);
return cudaGetLastError() != cudaSuccess;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
4c12157aede28fd6cf7ecb6383c183ce2e8e0336.hip | // !!! This is a file automatically generated by hipify!!!
#include <GraphMol/GraphMol.h>
#include <GraphMol/FileParsers/MolSupplier.h>
#include <GraphMol/FileParsers/MolWriters.h>
#include <GraphMol/FileParsers/FileParsers.h>
#include <GraphMol/RDKitBase.h>
#include <GraphMol/SmilesParse/SmilesWrite.h>
#include <RDGeneral/FileParseException.h>
#include <RDGeneral/BadFileException.h>
#include <iostream>
#include <algorithm>
#include <vector>
#include <string>
#include <stdio.h>
#include <chrono>
#include "math_constants.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime_api.h"
#include "hip/hip_runtime.h"
#include "helper.h"
#define NUM_OF_BLOCKS 360
using namespace RDKit;
using namespace std;
/**
* Struct used to keep track of the max result found.
* It keeps track of the distance, the angle and the rotamer. From old version and for future expansions,
* it keeps also track of the rotated positions of the first half of the molecule.
**/
struct max_value{
double distance;
int angle;
Rotamer rt;
atom_st* rot_mol_fst_half;
};
/**
* Compute the unit quaternion used in the computation of the rotation matrix.
* Each thread compute one unit_quaternion.
*
* @param res Array with the result.
* @param quaternion Array containing the data of the vector, which the atoms must rotate around.
**/
__global__ void compute_unit_quaternions(double4* res, double3 quaternion){
int tid = threadIdx.x;// + blockIdx.x*gridDim.x;
double norm;
double x , y ,z;
double angle;
double sin_2 , cos_2;
//compute the norm of the vector.
norm = norm3d(quaternion.x, quaternion.y,quaternion.z);
if(tid < 360){
x = quaternion.x/norm;
y = quaternion.y/norm;
z = quaternion.z/norm;
angle = CUDART_PI/180 * tid;
sin_2 = sin(angle/2);
cos_2 = cos(angle/2);
res[tid] = make_double4(x*sin_2, y*sin_2 , z*sin_2 , cos_2);//computed accordingly to quaternion explained in the report.
}
}
/**
* Main function of the code. It parse the file and retrieve all the necessary data for the computation.
* It takes as input the mol2 file that describe the molecule.
**/
int main(int argc, char** argv){
std::string mol_file = argv[1];
std::vector<Rotamer> rotamers;
std::vector<atom_st> atoms;
//RWMol *m = Mol2FileToMol( mol_file );
//std::shared_ptr<RDKit::ROMol>const mol( RDKit::Mol2FileToMol( mol_file,true,false,CORINA,false ) );
/**
* The following initialization works with the aspirin's mol2 file provided by the Professor.
* The declaration above works only with the file found online.
*/
std::shared_ptr<RDKit::ROMol>const mol( RDKit::Mol2FileToMol( mol_file,false,true,CORINA,false ) );
/**The next Line read the molecule removing the H atoms, it reduce the number of possible rotors
* for the aspirin and it seems to work, but idk with others molecules, so for now I keep
* more rotores, but with the possible right solution.
*/
//std::shared_ptr<RDKit::ROMol> mol( RDKit::Mol2FileToMol( mol_file,true,true,CORINA,false ) );
// Initialize the graph.
Graph graph = Graph(mol->getNumAtoms());
auto conf = mol->getConformer();
std::cout << "number of bonds: " << mol->getNumBonds() << '\n';// mol2->getNumBonds() << '\n';
if( !mol->getRingInfo()->isInitialized() ) {
RDKit::MolOps::findSSSR( *mol );
}
//for( unsigned int i = 0; i < mol->getNumBonds() ; i++ ) {
// const RDKit::Bond *bond = mol->getBondWithIdx( i );
//}
// Get all the Bond in the mol and add the valid ones to the rotamers' vector.
// Since the Bond in rings and the Double bond are not considerated useful for
// the rotation, it discards them.
for( unsigned int i = 0; i < mol->getNumBonds() ; i++ ) {
const RDKit::Bond *bond = mol->getBondWithIdx( i );
unsigned int startingAtom, endingAtom;
startingAtom = bond->getBeginAtomIdx();
endingAtom = bond->getEndAtomIdx();
graph.addEdge(startingAtom,endingAtom);
if( mol->getRingInfo()->numBondRings( bond->getIdx() )) {
//continue;
std::cout << "Bond " << bond->getIdx() << " is in a ring " << "stAtom: " << startingAtom << " endAtom: " << endingAtom << endl;
}
else if(bond->getBondType() == RDKit::Bond::BondType::DOUBLE){
//continue;
std::cout << "Bond " << bond->getIdx() << " is a DOUBLE bond " << "stAtom: " << startingAtom << " endAtom: " << endingAtom << endl;
}
else{
unsigned int id = bond->getIdx();
atom_st beginAtom;
atom_st endAtom;
beginAtom.id = startingAtom;
endAtom.id = endingAtom;
auto tmp_pos = conf.getAtomPos(beginAtom.id);
beginAtom.position = make_double3(tmp_pos[0],tmp_pos[1],tmp_pos[2]);
tmp_pos = conf.getAtomPos(endAtom.id);
endAtom.position = make_double3(tmp_pos[0],tmp_pos[1],tmp_pos[2]);
Rotamer rt = Rotamer(*bond,id, beginAtom, endAtom);
rotamers.push_back(rt);
}
}
// Add all the atoms to the atoms' vector
for(auto atom : mol->atoms()){
uint id = atom->getIdx();
auto pos_tmp = conf.getAtomPos(id);
double3 pos = make_double3(pos_tmp[0],pos_tmp[1],pos_tmp[2]);
atom_st at;
at.id = id;
at.position = pos;
atoms.push_back(at);
}
//Initialize the result storing structure.
max_value max_dist;
max_dist.distance = 0;
vector<unsigned int> first_half;
vector<unsigned int> second_half;
//Rotamer rt = rotamers[0];
//vector<Rotamer> tmp_rotamers ={rotamers[0], rotamers[1]};
auto start = std::chrono::high_resolution_clock::now();
// Cycle through all the available rotamers
for(auto rt : rotamers){
bool analize;
// Removing the analize edge/bond
graph.removeEdge(rt.getBeginAtom().id, rt.getEndingAtom().id);
// Compute the two halves of the splitted molecule.
graph.DFSlinkedNode(rt.getBeginAtom().id, first_half);
graph.DFSlinkedNode(rt.getEndingAtom().id, second_half);
vector<atom_st> atoms_first_half;
vector<atom_st> atoms_second_half;
for(auto i: first_half) atoms_first_half.push_back(atoms[i]);
for(auto i : second_half) atoms_second_half.push_back(atoms[i]);
max_value max_first_half;
max_first_half.distance = 0;
max_value max_second_half;
max_second_half.distance = 0;
Rotation r;
// If the bond split, create one half with only one atom. The bond is not a rotamer,
// so I don't rotate around it and skip the computation.
if(atoms_first_half.size() > 1 && second_half.size() > 1){
analize = true;
cout << "Checking rotamer: " << rt.getBond().getIdx() << " ";
cout << "Starting Atom: " << rt.getBeginAtom().id << " Ending Atom: " << rt.getEndingAtom().id << " ";
cout << "number of atom in first half: " << atoms_first_half.size() << endl;
vector<atom_st> distance_to_compute;
double4* unit_quaternions;
hipMallocManaged(&unit_quaternions, 360*sizeof(double4));
int deviceId;
hipGetDevice(&deviceId);
hipMemPrefetchAsync(unit_quaternions,360*sizeof(double4),deviceId);
double3 tmp_vector = rt.getVector();
// The computatioin of the unit quaternion is done in parallel for all
// the angle, launching the kernel with 360 threads, one for each angle.
hipLaunchKernelGGL(( compute_unit_quaternions), dim3(1),dim3(360), 0, 0, unit_quaternions,tmp_vector);
hipDeviceSynchronize();
double max = 0;
double* res;
for(int c = 0; c < 360; c += NUM_OF_BLOCKS ){
vector<vector<atom_st>> rot_first_half;
double3 tmp = rt.getBeginAtom().position;
// Compute the rotation and storing the result
rot_first_half = r.rotate_v5(c , atoms_first_half, tmp, unit_quaternions);
// Add all the element of the vector of vectors in a single vector with all the atoms.
// The atoms are in order of angle of rotation and every time is added the missing atoms
// of the second half of the molecule, in order to compute the internal distance.
for(int rotation = 0; rotation < NUM_OF_BLOCKS; rotation++){
//cout << "main line " << __LINE__ << endl;
for(int i = 0; i < atoms_first_half.size(); i++){
distance_to_compute.push_back(rot_first_half[rotation][i]);
}
//cout << "main line " << __LINE__ << endl;
for(atom_st at : atoms_second_half){
distance_to_compute.push_back(at);
}
}
// Compute the internal distance, storing the result in res.
res = distance_v3(distance_to_compute, atoms.size(), NUM_OF_BLOCKS);
// Select the rotation that has the highest internal distance,
// cycling through the results stored in res.
for(int i = 0; i < NUM_OF_BLOCKS;i++){
if(res[i] > max_first_half.distance) {
max_first_half.distance = res[i];
max_first_half.angle = c+i;
max_first_half.rt = rt;
}
}
distance_to_compute.clear();
rot_first_half.clear();
}
printf("Computed distance for the first part,\n");
printf("the max distance compute is %lf with angle %d around rotamer: %d\n", \
max_first_half.distance, max_first_half.angle,max_first_half.rt.getBond().getIdx());
hipFree(unit_quaternions);
}
else{
analize = false;
printf("Checking rotamer %d ... ", rt.getBond().getIdx());
printf("Too few atoms in the partition, rotamer not analized\n");
}
double total = max_first_half.distance + max_second_half.distance;
if(total > max_dist.distance){
max_dist.distance = total;
max_dist.rt = max_first_half.rt;
max_dist.angle = max_first_half.angle;
}
first_half.clear();
second_half.clear();
atoms_first_half.clear();
atoms_second_half.clear();
// Adding again the edge corresponding to the bond, before computing another bond/rotamer.
graph.addEdge(rt.getBeginAtom().id,rt.getEndingAtom().id);
if(analize)
printf("For Rotamer %d, the max distance computed is: %lf,\n with a first angle: %d \n",\
rt.getBond().getIdx(),total,max_first_half.angle);
}
auto stop = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop-start);
cout << "duration time[ms]: " << duration.count() << endl;
printf("The maximum distance computed is %lf\n", max_dist.distance);
printf("Computed with an angle of %d, around the rotamer %d\n",max_dist.angle,max_dist.rt.getBond().getIdx());
return 0;
} | 4c12157aede28fd6cf7ecb6383c183ce2e8e0336.cu |
#include <GraphMol/GraphMol.h>
#include <GraphMol/FileParsers/MolSupplier.h>
#include <GraphMol/FileParsers/MolWriters.h>
#include <GraphMol/FileParsers/FileParsers.h>
#include <GraphMol/RDKitBase.h>
#include <GraphMol/SmilesParse/SmilesWrite.h>
#include <RDGeneral/FileParseException.h>
#include <RDGeneral/BadFileException.h>
#include <iostream>
#include <algorithm>
#include <vector>
#include <string>
#include <stdio.h>
#include <chrono>
#include "math_constants.h"
#include "cuda.h"
#include "cuda_runtime_api.h"
#include "cuda_runtime.h"
#include "helper.h"
#define NUM_OF_BLOCKS 360
using namespace RDKit;
using namespace std;
/**
* Struct used to keep track of the max result found.
* It keeps track of the distance, the angle and the rotamer. From old version and for future expansions,
* it keeps also track of the rotated positions of the first half of the molecule.
**/
struct max_value{
double distance;
int angle;
Rotamer rt;
atom_st* rot_mol_fst_half;
};
/**
* Compute the unit quaternion used in the computation of the rotation matrix.
* Each thread compute one unit_quaternion.
*
* @param res Array with the result.
* @param quaternion Array containing the data of the vector, which the atoms must rotate around.
**/
__global__ void compute_unit_quaternions(double4* res, double3 quaternion){
int tid = threadIdx.x;// + blockIdx.x*gridDim.x;
double norm;
double x , y ,z;
double angle;
double sin_2 , cos_2;
//compute the norm of the vector.
norm = norm3d(quaternion.x, quaternion.y,quaternion.z);
if(tid < 360){
x = quaternion.x/norm;
y = quaternion.y/norm;
z = quaternion.z/norm;
angle = CUDART_PI/180 * tid;
sin_2 = sin(angle/2);
cos_2 = cos(angle/2);
res[tid] = make_double4(x*sin_2, y*sin_2 , z*sin_2 , cos_2);//computed accordingly to quaternion explained in the report.
}
}
/**
* Main function of the code. It parse the file and retrieve all the necessary data for the computation.
* It takes as input the mol2 file that describe the molecule.
**/
int main(int argc, char** argv){
std::string mol_file = argv[1];
std::vector<Rotamer> rotamers;
std::vector<atom_st> atoms;
//RWMol *m = Mol2FileToMol( mol_file );
//std::shared_ptr<RDKit::ROMol>const mol( RDKit::Mol2FileToMol( mol_file,true,false,CORINA,false ) );
/**
* The following initialization works with the aspirin's mol2 file provided by the Professor.
* The declaration above works only with the file found online.
*/
std::shared_ptr<RDKit::ROMol>const mol( RDKit::Mol2FileToMol( mol_file,false,true,CORINA,false ) );
/**The next Line read the molecule removing the H atoms, it reduce the number of possible rotors
* for the aspirin and it seems to work, but idk with others molecules, so for now I keep
* more rotores, but with the possible right solution.
*/
//std::shared_ptr<RDKit::ROMol> mol( RDKit::Mol2FileToMol( mol_file,true,true,CORINA,false ) );
// Initialize the graph.
Graph graph = Graph(mol->getNumAtoms());
auto conf = mol->getConformer();
std::cout << "number of bonds: " << mol->getNumBonds() << '\n';// mol2->getNumBonds() << '\n';
if( !mol->getRingInfo()->isInitialized() ) {
RDKit::MolOps::findSSSR( *mol );
}
//for( unsigned int i = 0; i < mol->getNumBonds() ; i++ ) {
// const RDKit::Bond *bond = mol->getBondWithIdx( i );
//}
// Get all the Bond in the mol and add the valid ones to the rotamers' vector.
// Since the Bond in rings and the Double bond are not considerated useful for
// the rotation, it discards them.
for( unsigned int i = 0; i < mol->getNumBonds() ; i++ ) {
const RDKit::Bond *bond = mol->getBondWithIdx( i );
unsigned int startingAtom, endingAtom;
startingAtom = bond->getBeginAtomIdx();
endingAtom = bond->getEndAtomIdx();
graph.addEdge(startingAtom,endingAtom);
if( mol->getRingInfo()->numBondRings( bond->getIdx() )) {
//continue;
std::cout << "Bond " << bond->getIdx() << " is in a ring " << "stAtom: " << startingAtom << " endAtom: " << endingAtom << endl;
}
else if(bond->getBondType() == RDKit::Bond::BondType::DOUBLE){
//continue;
std::cout << "Bond " << bond->getIdx() << " is a DOUBLE bond " << "stAtom: " << startingAtom << " endAtom: " << endingAtom << endl;
}
else{
unsigned int id = bond->getIdx();
atom_st beginAtom;
atom_st endAtom;
beginAtom.id = startingAtom;
endAtom.id = endingAtom;
auto tmp_pos = conf.getAtomPos(beginAtom.id);
beginAtom.position = make_double3(tmp_pos[0],tmp_pos[1],tmp_pos[2]);
tmp_pos = conf.getAtomPos(endAtom.id);
endAtom.position = make_double3(tmp_pos[0],tmp_pos[1],tmp_pos[2]);
Rotamer rt = Rotamer(*bond,id, beginAtom, endAtom);
rotamers.push_back(rt);
}
}
// Add all the atoms to the atoms' vector
for(auto atom : mol->atoms()){
uint id = atom->getIdx();
auto pos_tmp = conf.getAtomPos(id);
double3 pos = make_double3(pos_tmp[0],pos_tmp[1],pos_tmp[2]);
atom_st at;
at.id = id;
at.position = pos;
atoms.push_back(at);
}
//Initialize the result storing structure.
max_value max_dist;
max_dist.distance = 0;
vector<unsigned int> first_half;
vector<unsigned int> second_half;
//Rotamer rt = rotamers[0];
//vector<Rotamer> tmp_rotamers ={rotamers[0], rotamers[1]};
auto start = std::chrono::high_resolution_clock::now();
// Cycle through all the available rotamers
for(auto rt : rotamers){
bool analize;
// Removing the analize edge/bond
graph.removeEdge(rt.getBeginAtom().id, rt.getEndingAtom().id);
// Compute the two halves of the splitted molecule.
graph.DFSlinkedNode(rt.getBeginAtom().id, first_half);
graph.DFSlinkedNode(rt.getEndingAtom().id, second_half);
vector<atom_st> atoms_first_half;
vector<atom_st> atoms_second_half;
for(auto i: first_half) atoms_first_half.push_back(atoms[i]);
for(auto i : second_half) atoms_second_half.push_back(atoms[i]);
max_value max_first_half;
max_first_half.distance = 0;
max_value max_second_half;
max_second_half.distance = 0;
Rotation r;
// If the bond split, create one half with only one atom. The bond is not a rotamer,
// so I don't rotate around it and skip the computation.
if(atoms_first_half.size() > 1 && second_half.size() > 1){
analize = true;
cout << "Checking rotamer: " << rt.getBond().getIdx() << " ";
cout << "Starting Atom: " << rt.getBeginAtom().id << " Ending Atom: " << rt.getEndingAtom().id << " ";
cout << "number of atom in first half: " << atoms_first_half.size() << endl;
vector<atom_st> distance_to_compute;
double4* unit_quaternions;
cudaMallocManaged(&unit_quaternions, 360*sizeof(double4));
int deviceId;
cudaGetDevice(&deviceId);
cudaMemPrefetchAsync(unit_quaternions,360*sizeof(double4),deviceId);
double3 tmp_vector = rt.getVector();
// The computatioin of the unit quaternion is done in parallel for all
// the angle, launching the kernel with 360 threads, one for each angle.
compute_unit_quaternions<<<1,360>>>(unit_quaternions,tmp_vector);
cudaDeviceSynchronize();
double max = 0;
double* res;
for(int c = 0; c < 360; c += NUM_OF_BLOCKS ){
vector<vector<atom_st>> rot_first_half;
double3 tmp = rt.getBeginAtom().position;
// Compute the rotation and storing the result
rot_first_half = r.rotate_v5(c , atoms_first_half, tmp, unit_quaternions);
// Add all the element of the vector of vectors in a single vector with all the atoms.
// The atoms are in order of angle of rotation and every time is added the missing atoms
// of the second half of the molecule, in order to compute the internal distance.
for(int rotation = 0; rotation < NUM_OF_BLOCKS; rotation++){
//cout << "main line " << __LINE__ << endl;
for(int i = 0; i < atoms_first_half.size(); i++){
distance_to_compute.push_back(rot_first_half[rotation][i]);
}
//cout << "main line " << __LINE__ << endl;
for(atom_st at : atoms_second_half){
distance_to_compute.push_back(at);
}
}
// Compute the internal distance, storing the result in res.
res = distance_v3(distance_to_compute, atoms.size(), NUM_OF_BLOCKS);
// Select the rotation that has the highest internal distance,
// cycling through the results stored in res.
for(int i = 0; i < NUM_OF_BLOCKS;i++){
if(res[i] > max_first_half.distance) {
max_first_half.distance = res[i];
max_first_half.angle = c+i;
max_first_half.rt = rt;
}
}
distance_to_compute.clear();
rot_first_half.clear();
}
printf("Computed distance for the first part,\n");
printf("the max distance compute is %lf with angle %d around rotamer: %d\n", \
max_first_half.distance, max_first_half.angle,max_first_half.rt.getBond().getIdx());
cudaFree(unit_quaternions);
}
else{
analize = false;
printf("Checking rotamer %d ... ", rt.getBond().getIdx());
printf("Too few atoms in the partition, rotamer not analized\n");
}
double total = max_first_half.distance + max_second_half.distance;
if(total > max_dist.distance){
max_dist.distance = total;
max_dist.rt = max_first_half.rt;
max_dist.angle = max_first_half.angle;
}
first_half.clear();
second_half.clear();
atoms_first_half.clear();
atoms_second_half.clear();
// Adding again the edge corresponding to the bond, before computing another bond/rotamer.
graph.addEdge(rt.getBeginAtom().id,rt.getEndingAtom().id);
if(analize)
printf("For Rotamer %d, the max distance computed is: %lf,\n with a first angle: %d \n",\
rt.getBond().getIdx(),total,max_first_half.angle);
}
auto stop = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop-start);
cout << "duration time[ms]: " << duration.count() << endl;
printf("The maximum distance computed is %lf\n", max_dist.distance);
printf("Computed with an angle of %d, around the rotamer %d\n",max_dist.angle,max_dist.rt.getBond().getIdx());
return 0;
} |
f3ce3bedb2a9b0087b65e5672a13a43b084fd3ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* reduction.cu
*
* Created on: Sep 5, 2016
* Author: uwe
*/
#include "reduction.h"
namespace as {
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template<>
struct SharedMemory<double>
{
__device__ inline operator double *()
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
__device__ inline operator const double *() const
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
};
// each thread block reduces the arrays one structure
template <typename T, unsigned int blockSize, bool nIsPow2>
__global__ void
blockReduce(unsigned numAtoms,
T* xPos, T* yPos, T* zPos,
T *d_fx, T *d_fy, T *d_fz, T *d_E,
T *g_odata)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = threadIdx.x; // half the number of blocks
unsigned int base = blockIdx.x*numAtoms;
T sum_fx = 0;
T sum_fy = 0;
T sum_fz = 0;
T sum_E = 0;
T sum_torque[9] = {0};
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < numAtoms)
{
T fx, fy, fz, x, y, z;
fx = d_fx[base + i];
fy = d_fy[base + i];
fz = d_fz[base + i];
x = xPos[i];
y = yPos[i];
z = zPos[i];
sum_fx += fx;
sum_fy += fy;
sum_fz += fz;
sum_E += d_E[base + i];
sum_torque[0] += x * fx;
sum_torque[1] += y * fx;
sum_torque[2] += z * fx;
sum_torque[3] += x * fy;
sum_torque[4] += y * fy;
sum_torque[5] += z * fy;
sum_torque[6] += x * fz;
sum_torque[7] += y * fz;
sum_torque[8] += z * fz;
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < numAtoms) {
fx = d_fx[base + i + blockSize];
fy = d_fy[base + i + blockSize];
fz = d_fz[base + i + blockSize];
x = xPos[i + blockSize];
y = yPos[i + blockSize];
z = zPos[i + blockSize];
sum_fx += fx;
sum_fy += fy;
sum_fz += fz;
sum_E += d_E[base + i + blockSize];
sum_torque[0] += x * fx;
sum_torque[1] += y * fx;
sum_torque[2] += z * fx;
sum_torque[3] += x * fy;
sum_torque[4] += y * fy;
sum_torque[5] += z * fy;
sum_torque[6] += x * fz;
sum_torque[7] += y * fz;
sum_torque[8] += z * fz;
}
i += blockSize*2;
}
// each thread puts its local sum into shared memory
sdata[tid + 0 * blockSize] = sum_fx;
sdata[tid + 1 * blockSize] = sum_fy;
sdata[tid + 2 * blockSize] = sum_fz;
sdata[tid + 3 * blockSize] = sum_E;
sdata[tid + 4 * blockSize] = sum_torque[0];
sdata[tid + 5 * blockSize] = sum_torque[1];
sdata[tid + 6 * blockSize] = sum_torque[2];
sdata[tid + 7 * blockSize] = sum_torque[3];
sdata[tid + 8 * blockSize] = sum_torque[4];
sdata[tid + 9 * blockSize] = sum_torque[5];
sdata[tid + 10* blockSize] = sum_torque[6];
sdata[tid + 11* blockSize] = sum_torque[7];
sdata[tid + 12* blockSize] = sum_torque[8];
__syncthreads();
// do reduction in shared mem
if ((blockSize >= 1024) && (tid < 512))
{
sdata[tid + 0 * blockSize] = sum_fx = sum_fx + sdata[tid + 0 * blockSize + 512];
sdata[tid + 1 * blockSize] = sum_fy = sum_fy + sdata[tid + 1 * blockSize + 512];
sdata[tid + 2 * blockSize] = sum_fz = sum_fz + sdata[tid + 2 * blockSize + 512];
sdata[tid + 3 * blockSize] = sum_E = sum_E + sdata[tid + 3 * blockSize + 512];
sdata[tid + 4 * blockSize] = sum_torque[0] = sum_torque[0] + sdata[tid + 4 * blockSize + 512];
sdata[tid + 5 * blockSize] = sum_torque[1] = sum_torque[1] + sdata[tid + 5 * blockSize + 512];
sdata[tid + 6 * blockSize] = sum_torque[2] = sum_torque[2] + sdata[tid + 6 * blockSize + 512];
sdata[tid + 7 * blockSize] = sum_torque[3] = sum_torque[3] + sdata[tid + 7 * blockSize + 512];
sdata[tid + 8 * blockSize] = sum_torque[4] = sum_torque[4] + sdata[tid + 8 * blockSize + 512];
sdata[tid + 9 * blockSize] = sum_torque[5] = sum_torque[5] + sdata[tid + 9 * blockSize + 512];
sdata[tid + 10* blockSize] = sum_torque[6] = sum_torque[6] + sdata[tid + 10* blockSize + 512];
sdata[tid + 11* blockSize] = sum_torque[7] = sum_torque[7] + sdata[tid + 11* blockSize + 512];
sdata[tid + 12* blockSize] = sum_torque[8] = sum_torque[8] + sdata[tid + 12* blockSize + 512];
}
__syncthreads();
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid + 0 * blockSize] = sum_fx = sum_fx + sdata[tid + 0 * blockSize + 256];
sdata[tid + 1 * blockSize] = sum_fy = sum_fy + sdata[tid + 1 * blockSize + 256];
sdata[tid + 2 * blockSize] = sum_fz = sum_fz + sdata[tid + 2 * blockSize + 256];
sdata[tid + 3 * blockSize] = sum_E = sum_E + sdata[tid + 3 * blockSize + 256];
sdata[tid + 4 * blockSize] = sum_torque[0] = sum_torque[0] + sdata[tid + 4 * blockSize + 256];
sdata[tid + 5 * blockSize] = sum_torque[1] = sum_torque[1] + sdata[tid + 5 * blockSize + 256];
sdata[tid + 6 * blockSize] = sum_torque[2] = sum_torque[2] + sdata[tid + 6 * blockSize + 256];
sdata[tid + 7 * blockSize] = sum_torque[3] = sum_torque[3] + sdata[tid + 7 * blockSize + 256];
sdata[tid + 8 * blockSize] = sum_torque[4] = sum_torque[4] + sdata[tid + 8 * blockSize + 256];
sdata[tid + 9 * blockSize] = sum_torque[5] = sum_torque[5] + sdata[tid + 9 * blockSize + 256];
sdata[tid + 10* blockSize] = sum_torque[6] = sum_torque[6] + sdata[tid + 10* blockSize + 256];
sdata[tid + 11* blockSize] = sum_torque[7] = sum_torque[7] + sdata[tid + 11* blockSize + 256];
sdata[tid + 12* blockSize] = sum_torque[8] = sum_torque[8] + sdata[tid + 12* blockSize + 256];
}
__syncthreads();
if ((blockSize >= 256) &&(tid < 128))
{
sdata[tid + 0 * blockSize] = sum_fx = sum_fx + sdata[tid + 0 * blockSize + 128];
sdata[tid + 1 * blockSize] = sum_fy = sum_fy + sdata[tid + 1 * blockSize + 128];
sdata[tid + 2 * blockSize] = sum_fz = sum_fz + sdata[tid + 2 * blockSize + 128];
sdata[tid + 3 * blockSize] = sum_E = sum_E + sdata[tid + 3 * blockSize + 128];
sdata[tid + 4 * blockSize] = sum_torque[0] = sum_torque[0] + sdata[tid + 4 * blockSize + 128];
sdata[tid + 5 * blockSize] = sum_torque[1] = sum_torque[1] + sdata[tid + 5 * blockSize + 128];
sdata[tid + 6 * blockSize] = sum_torque[2] = sum_torque[2] + sdata[tid + 6 * blockSize + 128];
sdata[tid + 7 * blockSize] = sum_torque[3] = sum_torque[3] + sdata[tid + 7 * blockSize + 128];
sdata[tid + 8 * blockSize] = sum_torque[4] = sum_torque[4] + sdata[tid + 8 * blockSize + 128];
sdata[tid + 9 * blockSize] = sum_torque[5] = sum_torque[5] + sdata[tid + 9 * blockSize + 128];
sdata[tid + 10* blockSize] = sum_torque[6] = sum_torque[6] + sdata[tid + 10* blockSize + 128];
sdata[tid + 11* blockSize] = sum_torque[7] = sum_torque[7] + sdata[tid + 11* blockSize + 128];
sdata[tid + 12* blockSize] = sum_torque[8] = sum_torque[8] + sdata[tid + 12* blockSize + 128];
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid + 0 * blockSize] = sum_fx = sum_fx + sdata[tid + 0 * blockSize + 64];
sdata[tid + 1 * blockSize] = sum_fy = sum_fy + sdata[tid + 1 * blockSize + 64];
sdata[tid + 2 * blockSize] = sum_fz = sum_fz + sdata[tid + 2 * blockSize + 64];
sdata[tid + 3 * blockSize] = sum_E = sum_E + sdata[tid + 3 * blockSize + 64];
sdata[tid + 4 * blockSize] = sum_torque[0] = sum_torque[0] + sdata[tid + 4 * blockSize + 64];
sdata[tid + 5 * blockSize] = sum_torque[1] = sum_torque[1] + sdata[tid + 5 * blockSize + 64];
sdata[tid + 6 * blockSize] = sum_torque[2] = sum_torque[2] + sdata[tid + 6 * blockSize + 64];
sdata[tid + 7 * blockSize] = sum_torque[3] = sum_torque[3] + sdata[tid + 7 * blockSize + 64];
sdata[tid + 8 * blockSize] = sum_torque[4] = sum_torque[4] + sdata[tid + 8 * blockSize + 64];
sdata[tid + 9 * blockSize] = sum_torque[5] = sum_torque[5] + sdata[tid + 9 * blockSize + 64];
sdata[tid + 10* blockSize] = sum_torque[6] = sum_torque[6] + sdata[tid + 10* blockSize + 64];
sdata[tid + 11* blockSize] = sum_torque[7] = sum_torque[7] + sdata[tid + 11* blockSize + 64];
sdata[tid + 12* blockSize] = sum_torque[8] = sum_torque[8] + sdata[tid + 12* blockSize + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) {
sum_fx += sdata[tid + 0 * blockSize + 32];
sum_fy += sdata[tid + 1 * blockSize + 32];
sum_fz += sdata[tid + 2 * blockSize + 32];
sum_E += sdata[tid + 3 * blockSize + 32];
sum_torque[0] += sdata[tid + 4 * blockSize + 32];
sum_torque[1] += sdata[tid + 5 * blockSize + 32];
sum_torque[2] += sdata[tid + 6 * blockSize + 32];
sum_torque[3] += sdata[tid + 7 * blockSize + 32];
sum_torque[4] += sdata[tid + 8 * blockSize + 32];
sum_torque[5] += sdata[tid + 9 * blockSize + 32];
sum_torque[6] += sdata[tid + 10* blockSize + 32];
sum_torque[7] += sdata[tid + 11* blockSize + 32];
sum_torque[8] += sdata[tid + 12* blockSize + 32];
}
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
sum_fx += __shfl_down(sum_fx , offset);
sum_fy += __shfl_down(sum_fy , offset);
sum_fz += __shfl_down(sum_fz , offset);
sum_E += __shfl_down(sum_E , offset);
sum_torque[0] += __shfl_down(sum_torque[0], offset);
sum_torque[1] += __shfl_down(sum_torque[1], offset);
sum_torque[2] += __shfl_down(sum_torque[2], offset);
sum_torque[3] += __shfl_down(sum_torque[3], offset);
sum_torque[4] += __shfl_down(sum_torque[4], offset);
sum_torque[5] += __shfl_down(sum_torque[5], offset);
sum_torque[6] += __shfl_down(sum_torque[6], offset);
sum_torque[7] += __shfl_down(sum_torque[7], offset);
sum_torque[8] += __shfl_down(sum_torque[8], offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid + 0 * blockSize] = sum_fx = sum_fx + sdata[tid + 0 * blockSize + 32];
sdata[tid + 1 * blockSize] = sum_fy = sum_fy + sdata[tid + 1 * blockSize + 32];
sdata[tid + 2 * blockSize] = sum_fz = sum_fz + sdata[tid + 2 * blockSize + 32];
sdata[tid + 3 * blockSize] = sum_E = sum_E + sdata[tid + 3 * blockSize + 32];
sdata[tid + 4 * blockSize] = sum_torque[0] = sum_torque[0] + sdata[tid + 4 * blockSize + 32];
sdata[tid + 5 * blockSize] = sum_torque[1] = sum_torque[1] + sdata[tid + 5 * blockSize + 32];
sdata[tid + 6 * blockSize] = sum_torque[2] = sum_torque[2] + sdata[tid + 6 * blockSize + 32];
sdata[tid + 7 * blockSize] = sum_torque[3] = sum_torque[3] + sdata[tid + 7 * blockSize + 32];
sdata[tid + 8 * blockSize] = sum_torque[4] = sum_torque[4] + sdata[tid + 8 * blockSize + 32];
sdata[tid + 9 * blockSize] = sum_torque[5] = sum_torque[5] + sdata[tid + 9 * blockSize + 32];
sdata[tid + 10* blockSize] = sum_torque[6] = sum_torque[6] + sdata[tid + 10* blockSize + 32];
sdata[tid + 11* blockSize] = sum_torque[7] = sum_torque[7] + sdata[tid + 11* blockSize + 32];
sdata[tid + 12* blockSize] = sum_torque[8] = sum_torque[8] + sdata[tid + 12* blockSize + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid + 0 * blockSize] = sum_fx = sum_fx + sdata[tid + 0 * blockSize + 16];
sdata[tid + 1 * blockSize] = sum_fy = sum_fy + sdata[tid + 1 * blockSize + 16];
sdata[tid + 2 * blockSize] = sum_fz = sum_fz + sdata[tid + 2 * blockSize + 16];
sdata[tid + 3 * blockSize] = sum_E = sum_E + sdata[tid + 3 * blockSize + 16];
sdata[tid + 4 * blockSize] = sum_torque[0] = sum_torque[0] + sdata[tid + 4 * blockSize + 16];
sdata[tid + 5 * blockSize] = sum_torque[1] = sum_torque[1] + sdata[tid + 5 * blockSize + 16];
sdata[tid + 6 * blockSize] = sum_torque[2] = sum_torque[2] + sdata[tid + 6 * blockSize + 16];
sdata[tid + 7 * blockSize] = sum_torque[3] = sum_torque[3] + sdata[tid + 7 * blockSize + 16];
sdata[tid + 8 * blockSize] = sum_torque[4] = sum_torque[4] + sdata[tid + 8 * blockSize + 16];
sdata[tid + 9 * blockSize] = sum_torque[5] = sum_torque[5] + sdata[tid + 9 * blockSize + 16];
sdata[tid + 10* blockSize] = sum_torque[6] = sum_torque[6] + sdata[tid + 10* blockSize + 16];
sdata[tid + 11* blockSize] = sum_torque[7] = sum_torque[7] + sdata[tid + 11* blockSize + 16];
sdata[tid + 12* blockSize] = sum_torque[8] = sum_torque[8] + sdata[tid + 12* blockSize + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid + 0 * blockSize] = sum_fx = sum_fx + sdata[tid + 0 * blockSize + 8];
sdata[tid + 1 * blockSize] = sum_fy = sum_fy + sdata[tid + 1 * blockSize + 8];
sdata[tid + 2 * blockSize] = sum_fz = sum_fz + sdata[tid + 2 * blockSize + 8];
sdata[tid + 3 * blockSize] = sum_E = sum_E + sdata[tid + 3 * blockSize + 8];
sdata[tid + 4 * blockSize] = sum_torque[0] = sum_torque[0] + sdata[tid + 4 * blockSize + 8];
sdata[tid + 5 * blockSize] = sum_torque[1] = sum_torque[1] + sdata[tid + 5 * blockSize + 8];
sdata[tid + 6 * blockSize] = sum_torque[2] = sum_torque[2] + sdata[tid + 6 * blockSize + 8];
sdata[tid + 7 * blockSize] = sum_torque[3] = sum_torque[3] + sdata[tid + 7 * blockSize + 8];
sdata[tid + 8 * blockSize] = sum_torque[4] = sum_torque[4] + sdata[tid + 8 * blockSize + 8];
sdata[tid + 9 * blockSize] = sum_torque[5] = sum_torque[5] + sdata[tid + 9 * blockSize + 8];
sdata[tid + 10* blockSize] = sum_torque[6] = sum_torque[6] + sdata[tid + 10* blockSize + 8];
sdata[tid + 11* blockSize] = sum_torque[7] = sum_torque[7] + sdata[tid + 11* blockSize + 8];
sdata[tid + 12* blockSize] = sum_torque[8] = sum_torque[8] + sdata[tid + 12* blockSize + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid + 0 * blockSize] = sum_fx = sum_fx + sdata[tid + 0 * blockSize + 4];
sdata[tid + 1 * blockSize] = sum_fy = sum_fy + sdata[tid + 1 * blockSize + 4];
sdata[tid + 2 * blockSize] = sum_fz = sum_fz + sdata[tid + 2 * blockSize + 4];
sdata[tid + 3 * blockSize] = sum_E = sum_E + sdata[tid + 3 * blockSize + 4];
sdata[tid + 4 * blockSize] = sum_torque[0] = sum_torque[0] + sdata[tid + 4 * blockSize + 4];
sdata[tid + 5 * blockSize] = sum_torque[1] = sum_torque[1] + sdata[tid + 5 * blockSize + 4];
sdata[tid + 6 * blockSize] = sum_torque[2] = sum_torque[2] + sdata[tid + 6 * blockSize + 4];
sdata[tid + 7 * blockSize] = sum_torque[3] = sum_torque[3] + sdata[tid + 7 * blockSize + 4];
sdata[tid + 8 * blockSize] = sum_torque[4] = sum_torque[4] + sdata[tid + 8 * blockSize + 4];
sdata[tid + 9 * blockSize] = sum_torque[5] = sum_torque[5] + sdata[tid + 9 * blockSize + 4];
sdata[tid + 10* blockSize] = sum_torque[6] = sum_torque[6] + sdata[tid + 10* blockSize + 4];
sdata[tid + 11* blockSize] = sum_torque[7] = sum_torque[7] + sdata[tid + 11* blockSize + 4];
sdata[tid + 12* blockSize] = sum_torque[8] = sum_torque[8] + sdata[tid + 12* blockSize + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid + 0 * blockSize] = sum_fx = sum_fx + sdata[tid + 0 * blockSize + 2];
sdata[tid + 1 * blockSize] = sum_fy = sum_fy + sdata[tid + 1 * blockSize + 2];
sdata[tid + 2 * blockSize] = sum_fz = sum_fz + sdata[tid + 2 * blockSize + 2];
sdata[tid + 3 * blockSize] = sum_E = sum_E + sdata[tid + 3 * blockSize + 2];
sdata[tid + 4 * blockSize] = sum_torque[0] = sum_torque[0] + sdata[tid + 4 * blockSize + 2];
sdata[tid + 5 * blockSize] = sum_torque[1] = sum_torque[1] + sdata[tid + 5 * blockSize + 2];
sdata[tid + 6 * blockSize] = sum_torque[2] = sum_torque[2] + sdata[tid + 6 * blockSize + 2];
sdata[tid + 7 * blockSize] = sum_torque[3] = sum_torque[3] + sdata[tid + 7 * blockSize + 2];
sdata[tid + 8 * blockSize] = sum_torque[4] = sum_torque[4] + sdata[tid + 8 * blockSize + 2];
sdata[tid + 9 * blockSize] = sum_torque[5] = sum_torque[5] + sdata[tid + 9 * blockSize + 2];
sdata[tid + 10* blockSize] = sum_torque[6] = sum_torque[6] + sdata[tid + 10* blockSize + 2];
sdata[tid + 11* blockSize] = sum_torque[7] = sum_torque[7] + sdata[tid + 11* blockSize + 2];
sdata[tid + 12* blockSize] = sum_torque[8] = sum_torque[8] + sdata[tid + 12* blockSize + 2];
}
__syncthreads();
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid + 0 * blockSize] = sum_fx = sum_fx + sdata[tid + 0 * blockSize + 1];
sdata[tid + 1 * blockSize] = sum_fy = sum_fy + sdata[tid + 1 * blockSize + 1];
sdata[tid + 2 * blockSize] = sum_fz = sum_fz + sdata[tid + 2 * blockSize + 1];
sdata[tid + 3 * blockSize] = sum_E = sum_E + sdata[tid + 3 * blockSize + 1];
sdata[tid + 4 * blockSize] = sum_torque[0] = sum_torque[0] + sdata[tid + 4 * blockSize + 1];
sdata[tid + 5 * blockSize] = sum_torque[1] = sum_torque[1] + sdata[tid + 5 * blockSize + 1];
sdata[tid + 6 * blockSize] = sum_torque[2] = sum_torque[2] + sdata[tid + 6 * blockSize + 1];
sdata[tid + 7 * blockSize] = sum_torque[3] = sum_torque[3] + sdata[tid + 7 * blockSize + 1];
sdata[tid + 8 * blockSize] = sum_torque[4] = sum_torque[4] + sdata[tid + 8 * blockSize + 1];
sdata[tid + 9 * blockSize] = sum_torque[5] = sum_torque[5] + sdata[tid + 9 * blockSize + 1];
sdata[tid + 10* blockSize] = sum_torque[6] = sum_torque[6] + sdata[tid + 10* blockSize + 1];
sdata[tid + 11* blockSize] = sum_torque[7] = sum_torque[7] + sdata[tid + 11* blockSize + 1];
sdata[tid + 12* blockSize] = sum_torque[8] = sum_torque[8] + sdata[tid + 12* blockSize + 1];
}
__syncthreads();
#endif
// write result for this block to global mem
if (tid == 0) {
g_odata[0 + blockIdx.x*13] = sum_fx;
g_odata[1 + blockIdx.x*13] = sum_fy;
g_odata[2 + blockIdx.x*13] = sum_fz;
g_odata[3 + blockIdx.x*13] = sum_E;
g_odata[4 + blockIdx.x*13] = sum_torque[0];
g_odata[5 + blockIdx.x*13] = sum_torque[1];
g_odata[6 + blockIdx.x*13] = sum_torque[2];
g_odata[7 + blockIdx.x*13] = sum_torque[3];
g_odata[8 + blockIdx.x*13] = sum_torque[4];
g_odata[9 + blockIdx.x*13] = sum_torque[5];
g_odata[10 + blockIdx.x*13] = sum_torque[6];
g_odata[11 + blockIdx.x*13] = sum_torque[7];
g_odata[12 + blockIdx.x*13] = sum_torque[8];
}
}
template <class T>
void d_reduce(
const unsigned& threads,
const unsigned& blocks,
const unsigned& numAtoms,
T* xPos, T* yPos, T* zPos,
T *d_fx, T *d_fy, T *d_fz, T *d_E,
T *g_odata,
const hipStream_t& stream)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
const int smemSize = (threads <= 32) ? 2 * 13 * threads * sizeof(T) : 13 * threads * sizeof(T);
// choose which of the optimized versions of reduction to launch
if (isPow2(numAtoms))
{
switch (threads)
{
case 1024:
hipLaunchKernelGGL(( blockReduce<T, 1024,true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 512:
hipLaunchKernelGGL(( blockReduce<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 256:
hipLaunchKernelGGL(( blockReduce<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 128:
hipLaunchKernelGGL(( blockReduce<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 64:
hipLaunchKernelGGL(( blockReduce<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 32:
hipLaunchKernelGGL(( blockReduce<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 16:
hipLaunchKernelGGL(( blockReduce<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 8:
hipLaunchKernelGGL(( blockReduce<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 4:
hipLaunchKernelGGL(( blockReduce<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 2:
hipLaunchKernelGGL(( blockReduce<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 1:
hipLaunchKernelGGL(( blockReduce<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
}
}
else
{
switch (threads)
{
case 1024:
hipLaunchKernelGGL(( blockReduce<T, 1024,false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 512:
hipLaunchKernelGGL(( blockReduce<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 256:
hipLaunchKernelGGL(( blockReduce<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 128:
hipLaunchKernelGGL(( blockReduce<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 64:
hipLaunchKernelGGL(( blockReduce<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 32:
hipLaunchKernelGGL(( blockReduce<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 16:
hipLaunchKernelGGL(( blockReduce<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 8:
hipLaunchKernelGGL(( blockReduce<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 4:
hipLaunchKernelGGL(( blockReduce<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 2:
hipLaunchKernelGGL(( blockReduce<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 1:
hipLaunchKernelGGL(( blockReduce<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream , numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
}
}
}
template
void d_reduce<float>(
const unsigned& threads,
const unsigned& blocks,
const unsigned& numAtoms,
float* xPos, float* yPos, float* zPos,
float *d_fx, float *d_fy, float *d_fz, float *d_E,
float *g_odata,
const hipStream_t& stream);
template
void d_reduce<double>(
const unsigned& threads,
const unsigned& blocks,
const unsigned& numAtoms,
double* xPos, double* yPos, double* zPos,
double *d_fx, double *d_fy, double *d_fz, double *d_E,
double *g_odata,
const hipStream_t& stream);
} // namespace as
| f3ce3bedb2a9b0087b65e5672a13a43b084fd3ca.cu | /*
* reduction.cu
*
* Created on: Sep 5, 2016
* Author: uwe
*/
#include "reduction.h"
namespace as {
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template<>
struct SharedMemory<double>
{
__device__ inline operator double *()
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
__device__ inline operator const double *() const
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
};
// each thread block reduces the arrays one structure
template <typename T, unsigned int blockSize, bool nIsPow2>
__global__ void
blockReduce(unsigned numAtoms,
T* xPos, T* yPos, T* zPos,
T *d_fx, T *d_fy, T *d_fz, T *d_E,
T *g_odata)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = threadIdx.x; // half the number of blocks
unsigned int base = blockIdx.x*numAtoms;
T sum_fx = 0;
T sum_fy = 0;
T sum_fz = 0;
T sum_E = 0;
T sum_torque[9] = {0};
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < numAtoms)
{
T fx, fy, fz, x, y, z;
fx = d_fx[base + i];
fy = d_fy[base + i];
fz = d_fz[base + i];
x = xPos[i];
y = yPos[i];
z = zPos[i];
sum_fx += fx;
sum_fy += fy;
sum_fz += fz;
sum_E += d_E[base + i];
sum_torque[0] += x * fx;
sum_torque[1] += y * fx;
sum_torque[2] += z * fx;
sum_torque[3] += x * fy;
sum_torque[4] += y * fy;
sum_torque[5] += z * fy;
sum_torque[6] += x * fz;
sum_torque[7] += y * fz;
sum_torque[8] += z * fz;
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < numAtoms) {
fx = d_fx[base + i + blockSize];
fy = d_fy[base + i + blockSize];
fz = d_fz[base + i + blockSize];
x = xPos[i + blockSize];
y = yPos[i + blockSize];
z = zPos[i + blockSize];
sum_fx += fx;
sum_fy += fy;
sum_fz += fz;
sum_E += d_E[base + i + blockSize];
sum_torque[0] += x * fx;
sum_torque[1] += y * fx;
sum_torque[2] += z * fx;
sum_torque[3] += x * fy;
sum_torque[4] += y * fy;
sum_torque[5] += z * fy;
sum_torque[6] += x * fz;
sum_torque[7] += y * fz;
sum_torque[8] += z * fz;
}
i += blockSize*2;
}
// each thread puts its local sum into shared memory
sdata[tid + 0 * blockSize] = sum_fx;
sdata[tid + 1 * blockSize] = sum_fy;
sdata[tid + 2 * blockSize] = sum_fz;
sdata[tid + 3 * blockSize] = sum_E;
sdata[tid + 4 * blockSize] = sum_torque[0];
sdata[tid + 5 * blockSize] = sum_torque[1];
sdata[tid + 6 * blockSize] = sum_torque[2];
sdata[tid + 7 * blockSize] = sum_torque[3];
sdata[tid + 8 * blockSize] = sum_torque[4];
sdata[tid + 9 * blockSize] = sum_torque[5];
sdata[tid + 10* blockSize] = sum_torque[6];
sdata[tid + 11* blockSize] = sum_torque[7];
sdata[tid + 12* blockSize] = sum_torque[8];
__syncthreads();
// do reduction in shared mem
if ((blockSize >= 1024) && (tid < 512))
{
sdata[tid + 0 * blockSize] = sum_fx = sum_fx + sdata[tid + 0 * blockSize + 512];
sdata[tid + 1 * blockSize] = sum_fy = sum_fy + sdata[tid + 1 * blockSize + 512];
sdata[tid + 2 * blockSize] = sum_fz = sum_fz + sdata[tid + 2 * blockSize + 512];
sdata[tid + 3 * blockSize] = sum_E = sum_E + sdata[tid + 3 * blockSize + 512];
sdata[tid + 4 * blockSize] = sum_torque[0] = sum_torque[0] + sdata[tid + 4 * blockSize + 512];
sdata[tid + 5 * blockSize] = sum_torque[1] = sum_torque[1] + sdata[tid + 5 * blockSize + 512];
sdata[tid + 6 * blockSize] = sum_torque[2] = sum_torque[2] + sdata[tid + 6 * blockSize + 512];
sdata[tid + 7 * blockSize] = sum_torque[3] = sum_torque[3] + sdata[tid + 7 * blockSize + 512];
sdata[tid + 8 * blockSize] = sum_torque[4] = sum_torque[4] + sdata[tid + 8 * blockSize + 512];
sdata[tid + 9 * blockSize] = sum_torque[5] = sum_torque[5] + sdata[tid + 9 * blockSize + 512];
sdata[tid + 10* blockSize] = sum_torque[6] = sum_torque[6] + sdata[tid + 10* blockSize + 512];
sdata[tid + 11* blockSize] = sum_torque[7] = sum_torque[7] + sdata[tid + 11* blockSize + 512];
sdata[tid + 12* blockSize] = sum_torque[8] = sum_torque[8] + sdata[tid + 12* blockSize + 512];
}
__syncthreads();
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid + 0 * blockSize] = sum_fx = sum_fx + sdata[tid + 0 * blockSize + 256];
sdata[tid + 1 * blockSize] = sum_fy = sum_fy + sdata[tid + 1 * blockSize + 256];
sdata[tid + 2 * blockSize] = sum_fz = sum_fz + sdata[tid + 2 * blockSize + 256];
sdata[tid + 3 * blockSize] = sum_E = sum_E + sdata[tid + 3 * blockSize + 256];
sdata[tid + 4 * blockSize] = sum_torque[0] = sum_torque[0] + sdata[tid + 4 * blockSize + 256];
sdata[tid + 5 * blockSize] = sum_torque[1] = sum_torque[1] + sdata[tid + 5 * blockSize + 256];
sdata[tid + 6 * blockSize] = sum_torque[2] = sum_torque[2] + sdata[tid + 6 * blockSize + 256];
sdata[tid + 7 * blockSize] = sum_torque[3] = sum_torque[3] + sdata[tid + 7 * blockSize + 256];
sdata[tid + 8 * blockSize] = sum_torque[4] = sum_torque[4] + sdata[tid + 8 * blockSize + 256];
sdata[tid + 9 * blockSize] = sum_torque[5] = sum_torque[5] + sdata[tid + 9 * blockSize + 256];
sdata[tid + 10* blockSize] = sum_torque[6] = sum_torque[6] + sdata[tid + 10* blockSize + 256];
sdata[tid + 11* blockSize] = sum_torque[7] = sum_torque[7] + sdata[tid + 11* blockSize + 256];
sdata[tid + 12* blockSize] = sum_torque[8] = sum_torque[8] + sdata[tid + 12* blockSize + 256];
}
__syncthreads();
if ((blockSize >= 256) &&(tid < 128))
{
sdata[tid + 0 * blockSize] = sum_fx = sum_fx + sdata[tid + 0 * blockSize + 128];
sdata[tid + 1 * blockSize] = sum_fy = sum_fy + sdata[tid + 1 * blockSize + 128];
sdata[tid + 2 * blockSize] = sum_fz = sum_fz + sdata[tid + 2 * blockSize + 128];
sdata[tid + 3 * blockSize] = sum_E = sum_E + sdata[tid + 3 * blockSize + 128];
sdata[tid + 4 * blockSize] = sum_torque[0] = sum_torque[0] + sdata[tid + 4 * blockSize + 128];
sdata[tid + 5 * blockSize] = sum_torque[1] = sum_torque[1] + sdata[tid + 5 * blockSize + 128];
sdata[tid + 6 * blockSize] = sum_torque[2] = sum_torque[2] + sdata[tid + 6 * blockSize + 128];
sdata[tid + 7 * blockSize] = sum_torque[3] = sum_torque[3] + sdata[tid + 7 * blockSize + 128];
sdata[tid + 8 * blockSize] = sum_torque[4] = sum_torque[4] + sdata[tid + 8 * blockSize + 128];
sdata[tid + 9 * blockSize] = sum_torque[5] = sum_torque[5] + sdata[tid + 9 * blockSize + 128];
sdata[tid + 10* blockSize] = sum_torque[6] = sum_torque[6] + sdata[tid + 10* blockSize + 128];
sdata[tid + 11* blockSize] = sum_torque[7] = sum_torque[7] + sdata[tid + 11* blockSize + 128];
sdata[tid + 12* blockSize] = sum_torque[8] = sum_torque[8] + sdata[tid + 12* blockSize + 128];
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid + 0 * blockSize] = sum_fx = sum_fx + sdata[tid + 0 * blockSize + 64];
sdata[tid + 1 * blockSize] = sum_fy = sum_fy + sdata[tid + 1 * blockSize + 64];
sdata[tid + 2 * blockSize] = sum_fz = sum_fz + sdata[tid + 2 * blockSize + 64];
sdata[tid + 3 * blockSize] = sum_E = sum_E + sdata[tid + 3 * blockSize + 64];
sdata[tid + 4 * blockSize] = sum_torque[0] = sum_torque[0] + sdata[tid + 4 * blockSize + 64];
sdata[tid + 5 * blockSize] = sum_torque[1] = sum_torque[1] + sdata[tid + 5 * blockSize + 64];
sdata[tid + 6 * blockSize] = sum_torque[2] = sum_torque[2] + sdata[tid + 6 * blockSize + 64];
sdata[tid + 7 * blockSize] = sum_torque[3] = sum_torque[3] + sdata[tid + 7 * blockSize + 64];
sdata[tid + 8 * blockSize] = sum_torque[4] = sum_torque[4] + sdata[tid + 8 * blockSize + 64];
sdata[tid + 9 * blockSize] = sum_torque[5] = sum_torque[5] + sdata[tid + 9 * blockSize + 64];
sdata[tid + 10* blockSize] = sum_torque[6] = sum_torque[6] + sdata[tid + 10* blockSize + 64];
sdata[tid + 11* blockSize] = sum_torque[7] = sum_torque[7] + sdata[tid + 11* blockSize + 64];
sdata[tid + 12* blockSize] = sum_torque[8] = sum_torque[8] + sdata[tid + 12* blockSize + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) {
sum_fx += sdata[tid + 0 * blockSize + 32];
sum_fy += sdata[tid + 1 * blockSize + 32];
sum_fz += sdata[tid + 2 * blockSize + 32];
sum_E += sdata[tid + 3 * blockSize + 32];
sum_torque[0] += sdata[tid + 4 * blockSize + 32];
sum_torque[1] += sdata[tid + 5 * blockSize + 32];
sum_torque[2] += sdata[tid + 6 * blockSize + 32];
sum_torque[3] += sdata[tid + 7 * blockSize + 32];
sum_torque[4] += sdata[tid + 8 * blockSize + 32];
sum_torque[5] += sdata[tid + 9 * blockSize + 32];
sum_torque[6] += sdata[tid + 10* blockSize + 32];
sum_torque[7] += sdata[tid + 11* blockSize + 32];
sum_torque[8] += sdata[tid + 12* blockSize + 32];
}
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
sum_fx += __shfl_down(sum_fx , offset);
sum_fy += __shfl_down(sum_fy , offset);
sum_fz += __shfl_down(sum_fz , offset);
sum_E += __shfl_down(sum_E , offset);
sum_torque[0] += __shfl_down(sum_torque[0], offset);
sum_torque[1] += __shfl_down(sum_torque[1], offset);
sum_torque[2] += __shfl_down(sum_torque[2], offset);
sum_torque[3] += __shfl_down(sum_torque[3], offset);
sum_torque[4] += __shfl_down(sum_torque[4], offset);
sum_torque[5] += __shfl_down(sum_torque[5], offset);
sum_torque[6] += __shfl_down(sum_torque[6], offset);
sum_torque[7] += __shfl_down(sum_torque[7], offset);
sum_torque[8] += __shfl_down(sum_torque[8], offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid + 0 * blockSize] = sum_fx = sum_fx + sdata[tid + 0 * blockSize + 32];
sdata[tid + 1 * blockSize] = sum_fy = sum_fy + sdata[tid + 1 * blockSize + 32];
sdata[tid + 2 * blockSize] = sum_fz = sum_fz + sdata[tid + 2 * blockSize + 32];
sdata[tid + 3 * blockSize] = sum_E = sum_E + sdata[tid + 3 * blockSize + 32];
sdata[tid + 4 * blockSize] = sum_torque[0] = sum_torque[0] + sdata[tid + 4 * blockSize + 32];
sdata[tid + 5 * blockSize] = sum_torque[1] = sum_torque[1] + sdata[tid + 5 * blockSize + 32];
sdata[tid + 6 * blockSize] = sum_torque[2] = sum_torque[2] + sdata[tid + 6 * blockSize + 32];
sdata[tid + 7 * blockSize] = sum_torque[3] = sum_torque[3] + sdata[tid + 7 * blockSize + 32];
sdata[tid + 8 * blockSize] = sum_torque[4] = sum_torque[4] + sdata[tid + 8 * blockSize + 32];
sdata[tid + 9 * blockSize] = sum_torque[5] = sum_torque[5] + sdata[tid + 9 * blockSize + 32];
sdata[tid + 10* blockSize] = sum_torque[6] = sum_torque[6] + sdata[tid + 10* blockSize + 32];
sdata[tid + 11* blockSize] = sum_torque[7] = sum_torque[7] + sdata[tid + 11* blockSize + 32];
sdata[tid + 12* blockSize] = sum_torque[8] = sum_torque[8] + sdata[tid + 12* blockSize + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid + 0 * blockSize] = sum_fx = sum_fx + sdata[tid + 0 * blockSize + 16];
sdata[tid + 1 * blockSize] = sum_fy = sum_fy + sdata[tid + 1 * blockSize + 16];
sdata[tid + 2 * blockSize] = sum_fz = sum_fz + sdata[tid + 2 * blockSize + 16];
sdata[tid + 3 * blockSize] = sum_E = sum_E + sdata[tid + 3 * blockSize + 16];
sdata[tid + 4 * blockSize] = sum_torque[0] = sum_torque[0] + sdata[tid + 4 * blockSize + 16];
sdata[tid + 5 * blockSize] = sum_torque[1] = sum_torque[1] + sdata[tid + 5 * blockSize + 16];
sdata[tid + 6 * blockSize] = sum_torque[2] = sum_torque[2] + sdata[tid + 6 * blockSize + 16];
sdata[tid + 7 * blockSize] = sum_torque[3] = sum_torque[3] + sdata[tid + 7 * blockSize + 16];
sdata[tid + 8 * blockSize] = sum_torque[4] = sum_torque[4] + sdata[tid + 8 * blockSize + 16];
sdata[tid + 9 * blockSize] = sum_torque[5] = sum_torque[5] + sdata[tid + 9 * blockSize + 16];
sdata[tid + 10* blockSize] = sum_torque[6] = sum_torque[6] + sdata[tid + 10* blockSize + 16];
sdata[tid + 11* blockSize] = sum_torque[7] = sum_torque[7] + sdata[tid + 11* blockSize + 16];
sdata[tid + 12* blockSize] = sum_torque[8] = sum_torque[8] + sdata[tid + 12* blockSize + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid + 0 * blockSize] = sum_fx = sum_fx + sdata[tid + 0 * blockSize + 8];
sdata[tid + 1 * blockSize] = sum_fy = sum_fy + sdata[tid + 1 * blockSize + 8];
sdata[tid + 2 * blockSize] = sum_fz = sum_fz + sdata[tid + 2 * blockSize + 8];
sdata[tid + 3 * blockSize] = sum_E = sum_E + sdata[tid + 3 * blockSize + 8];
sdata[tid + 4 * blockSize] = sum_torque[0] = sum_torque[0] + sdata[tid + 4 * blockSize + 8];
sdata[tid + 5 * blockSize] = sum_torque[1] = sum_torque[1] + sdata[tid + 5 * blockSize + 8];
sdata[tid + 6 * blockSize] = sum_torque[2] = sum_torque[2] + sdata[tid + 6 * blockSize + 8];
sdata[tid + 7 * blockSize] = sum_torque[3] = sum_torque[3] + sdata[tid + 7 * blockSize + 8];
sdata[tid + 8 * blockSize] = sum_torque[4] = sum_torque[4] + sdata[tid + 8 * blockSize + 8];
sdata[tid + 9 * blockSize] = sum_torque[5] = sum_torque[5] + sdata[tid + 9 * blockSize + 8];
sdata[tid + 10* blockSize] = sum_torque[6] = sum_torque[6] + sdata[tid + 10* blockSize + 8];
sdata[tid + 11* blockSize] = sum_torque[7] = sum_torque[7] + sdata[tid + 11* blockSize + 8];
sdata[tid + 12* blockSize] = sum_torque[8] = sum_torque[8] + sdata[tid + 12* blockSize + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid + 0 * blockSize] = sum_fx = sum_fx + sdata[tid + 0 * blockSize + 4];
sdata[tid + 1 * blockSize] = sum_fy = sum_fy + sdata[tid + 1 * blockSize + 4];
sdata[tid + 2 * blockSize] = sum_fz = sum_fz + sdata[tid + 2 * blockSize + 4];
sdata[tid + 3 * blockSize] = sum_E = sum_E + sdata[tid + 3 * blockSize + 4];
sdata[tid + 4 * blockSize] = sum_torque[0] = sum_torque[0] + sdata[tid + 4 * blockSize + 4];
sdata[tid + 5 * blockSize] = sum_torque[1] = sum_torque[1] + sdata[tid + 5 * blockSize + 4];
sdata[tid + 6 * blockSize] = sum_torque[2] = sum_torque[2] + sdata[tid + 6 * blockSize + 4];
sdata[tid + 7 * blockSize] = sum_torque[3] = sum_torque[3] + sdata[tid + 7 * blockSize + 4];
sdata[tid + 8 * blockSize] = sum_torque[4] = sum_torque[4] + sdata[tid + 8 * blockSize + 4];
sdata[tid + 9 * blockSize] = sum_torque[5] = sum_torque[5] + sdata[tid + 9 * blockSize + 4];
sdata[tid + 10* blockSize] = sum_torque[6] = sum_torque[6] + sdata[tid + 10* blockSize + 4];
sdata[tid + 11* blockSize] = sum_torque[7] = sum_torque[7] + sdata[tid + 11* blockSize + 4];
sdata[tid + 12* blockSize] = sum_torque[8] = sum_torque[8] + sdata[tid + 12* blockSize + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid + 0 * blockSize] = sum_fx = sum_fx + sdata[tid + 0 * blockSize + 2];
sdata[tid + 1 * blockSize] = sum_fy = sum_fy + sdata[tid + 1 * blockSize + 2];
sdata[tid + 2 * blockSize] = sum_fz = sum_fz + sdata[tid + 2 * blockSize + 2];
sdata[tid + 3 * blockSize] = sum_E = sum_E + sdata[tid + 3 * blockSize + 2];
sdata[tid + 4 * blockSize] = sum_torque[0] = sum_torque[0] + sdata[tid + 4 * blockSize + 2];
sdata[tid + 5 * blockSize] = sum_torque[1] = sum_torque[1] + sdata[tid + 5 * blockSize + 2];
sdata[tid + 6 * blockSize] = sum_torque[2] = sum_torque[2] + sdata[tid + 6 * blockSize + 2];
sdata[tid + 7 * blockSize] = sum_torque[3] = sum_torque[3] + sdata[tid + 7 * blockSize + 2];
sdata[tid + 8 * blockSize] = sum_torque[4] = sum_torque[4] + sdata[tid + 8 * blockSize + 2];
sdata[tid + 9 * blockSize] = sum_torque[5] = sum_torque[5] + sdata[tid + 9 * blockSize + 2];
sdata[tid + 10* blockSize] = sum_torque[6] = sum_torque[6] + sdata[tid + 10* blockSize + 2];
sdata[tid + 11* blockSize] = sum_torque[7] = sum_torque[7] + sdata[tid + 11* blockSize + 2];
sdata[tid + 12* blockSize] = sum_torque[8] = sum_torque[8] + sdata[tid + 12* blockSize + 2];
}
__syncthreads();
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid + 0 * blockSize] = sum_fx = sum_fx + sdata[tid + 0 * blockSize + 1];
sdata[tid + 1 * blockSize] = sum_fy = sum_fy + sdata[tid + 1 * blockSize + 1];
sdata[tid + 2 * blockSize] = sum_fz = sum_fz + sdata[tid + 2 * blockSize + 1];
sdata[tid + 3 * blockSize] = sum_E = sum_E + sdata[tid + 3 * blockSize + 1];
sdata[tid + 4 * blockSize] = sum_torque[0] = sum_torque[0] + sdata[tid + 4 * blockSize + 1];
sdata[tid + 5 * blockSize] = sum_torque[1] = sum_torque[1] + sdata[tid + 5 * blockSize + 1];
sdata[tid + 6 * blockSize] = sum_torque[2] = sum_torque[2] + sdata[tid + 6 * blockSize + 1];
sdata[tid + 7 * blockSize] = sum_torque[3] = sum_torque[3] + sdata[tid + 7 * blockSize + 1];
sdata[tid + 8 * blockSize] = sum_torque[4] = sum_torque[4] + sdata[tid + 8 * blockSize + 1];
sdata[tid + 9 * blockSize] = sum_torque[5] = sum_torque[5] + sdata[tid + 9 * blockSize + 1];
sdata[tid + 10* blockSize] = sum_torque[6] = sum_torque[6] + sdata[tid + 10* blockSize + 1];
sdata[tid + 11* blockSize] = sum_torque[7] = sum_torque[7] + sdata[tid + 11* blockSize + 1];
sdata[tid + 12* blockSize] = sum_torque[8] = sum_torque[8] + sdata[tid + 12* blockSize + 1];
}
__syncthreads();
#endif
// write result for this block to global mem
if (tid == 0) {
g_odata[0 + blockIdx.x*13] = sum_fx;
g_odata[1 + blockIdx.x*13] = sum_fy;
g_odata[2 + blockIdx.x*13] = sum_fz;
g_odata[3 + blockIdx.x*13] = sum_E;
g_odata[4 + blockIdx.x*13] = sum_torque[0];
g_odata[5 + blockIdx.x*13] = sum_torque[1];
g_odata[6 + blockIdx.x*13] = sum_torque[2];
g_odata[7 + blockIdx.x*13] = sum_torque[3];
g_odata[8 + blockIdx.x*13] = sum_torque[4];
g_odata[9 + blockIdx.x*13] = sum_torque[5];
g_odata[10 + blockIdx.x*13] = sum_torque[6];
g_odata[11 + blockIdx.x*13] = sum_torque[7];
g_odata[12 + blockIdx.x*13] = sum_torque[8];
}
}
template <class T>
void d_reduce(
const unsigned& threads,
const unsigned& blocks,
const unsigned& numAtoms,
T* xPos, T* yPos, T* zPos,
T *d_fx, T *d_fy, T *d_fz, T *d_E,
T *g_odata,
const cudaStream_t& stream)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
const int smemSize = (threads <= 32) ? 2 * 13 * threads * sizeof(T) : 13 * threads * sizeof(T);
// choose which of the optimized versions of reduction to launch
if (isPow2(numAtoms))
{
switch (threads)
{
case 1024:
blockReduce<T, 1024,true><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 512:
blockReduce<T, 512, true><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 256:
blockReduce<T, 256, true><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 128:
blockReduce<T, 128, true><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 64:
blockReduce<T, 64, true><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 32:
blockReduce<T, 32, true><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 16:
blockReduce<T, 16, true><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 8:
blockReduce<T, 8, true><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 4:
blockReduce<T, 4, true><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 2:
blockReduce<T, 2, true><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 1:
blockReduce<T, 1, true><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
}
}
else
{
switch (threads)
{
case 1024:
blockReduce<T, 1024,false><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 512:
blockReduce<T, 512, false><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 256:
blockReduce<T, 256, false><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 128:
blockReduce<T, 128, false><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 64:
blockReduce<T, 64, false><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 32:
blockReduce<T, 32, false><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 16:
blockReduce<T, 16, false><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 8:
blockReduce<T, 8, false><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 4:
blockReduce<T, 4, false><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 2:
blockReduce<T, 2, false><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
case 1:
blockReduce<T, 1, false><<< dimGrid, dimBlock, smemSize, stream >>>(numAtoms, xPos, yPos, zPos, d_fx, d_fy, d_fz, d_E, g_odata);
break;
}
}
}
template
void d_reduce<float>(
const unsigned& threads,
const unsigned& blocks,
const unsigned& numAtoms,
float* xPos, float* yPos, float* zPos,
float *d_fx, float *d_fy, float *d_fz, float *d_E,
float *g_odata,
const cudaStream_t& stream);
template
void d_reduce<double>(
const unsigned& threads,
const unsigned& blocks,
const unsigned& numAtoms,
double* xPos, double* yPos, double* zPos,
double *d_fx, double *d_fy, double *d_fz, double *d_E,
double *g_odata,
const cudaStream_t& stream);
} // namespace as
|
d94b4f10026d81a2b32cdca952fc1d7b6e805f62.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <utilities/base_fixture.hpp>
#include <utilities/test_utilities.hpp>
#include <cugraph/algorithms.hpp>
#include <cugraph/graph.hpp>
#include <cugraph/graph_generators.hpp>
#include <cugraph/graph_view.hpp>
#include <cugraph/utilities/high_res_timer.hpp>
#include <raft/core/handle.hpp>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <hip/hip_runtime_api.h>
#include <thrust/copy.h>
#include <thrust/extrema.h>
#include <thrust/functional.h>
#include <thrust/reduce.h>
#include <thrust/replace.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <iostream>
#include <limits>
#include <tuple>
#include <utilities/thrust_wrapper.hpp>
#include <vector>
struct MsBfs_Usecase {
size_t n_edgelists;
size_t min_scale;
size_t max_scale;
size_t edge_factor;
int radius;
};
template <typename vertex_t>
void translate_vertex_ids(raft::handle_t const& handle,
rmm::device_uvector<vertex_t>& d_src_v,
rmm::device_uvector<vertex_t>& d_dst_v,
vertex_t vertex_id_offset)
{
thrust::transform(rmm::exec_policy(handle.get_stream()),
d_src_v.begin(),
d_src_v.end(),
d_src_v.begin(),
[offset = vertex_id_offset] __device__(vertex_t v) { return offset + v; });
thrust::transform(rmm::exec_policy(handle.get_stream()),
d_dst_v.begin(),
d_dst_v.end(),
d_dst_v.begin(),
[offset = vertex_id_offset] __device__(vertex_t v) { return offset + v; });
}
class Tests_MsBfs : public ::testing::TestWithParam<MsBfs_Usecase> {
public:
Tests_MsBfs() {}
static void SetUpTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
template <typename vertex_t, typename edge_t>
void run_current_test(MsBfs_Usecase const& configuration)
{
using weight_t = float;
auto stream_pool = std::make_shared<rmm::cuda_stream_pool>(16);
raft::handle_t handle(rmm::cuda_stream_per_thread, stream_pool);
auto edgelists =
cugraph::generate_rmat_edgelists<vertex_t>(handle,
configuration.n_edgelists,
configuration.min_scale,
configuration.max_scale,
configuration.edge_factor,
cugraph::generator_distribution_t::POWER_LAW,
cugraph::generator_distribution_t::UNIFORM,
uint64_t{0});
// form aggregated edge list
vertex_t n_edges = 0, offset = 0, n_vertices = 0;
std::vector<vertex_t> h_sources;
for (auto i = edgelists.begin(); i != edgelists.end(); ++i) {
// translate
translate_vertex_ids(handle, std::get<0>(*i), std::get<1>(*i), offset);
n_edges += std::get<0>(*i).size();
// populating sources with the smallest v_id in the component
h_sources.push_back(offset);
// v offset is max of src/dst
auto max_src = thrust::reduce(rmm::exec_policy(handle.get_stream()),
std::get<0>(*i).begin(),
std::get<0>(*i).end(),
static_cast<vertex_t>(0),
thrust::maximum<vertex_t>());
auto max_dst = thrust::reduce(rmm::exec_policy(handle.get_stream()),
std::get<1>(*i).begin(),
std::get<1>(*i).end(),
static_cast<vertex_t>(0),
thrust::maximum<vertex_t>());
offset = ::max(max_src, max_dst) + 1;
}
n_vertices = offset;
std::cout << n_vertices << std::endl;
std::cout << n_edges << std::endl;
rmm::device_uvector<vertex_t> d_srcs(n_edges, handle.get_stream());
rmm::device_uvector<vertex_t> d_dst(n_edges, handle.get_stream());
auto it_src = d_srcs.begin();
auto it_dst = d_dst.begin();
for (auto i = edgelists.begin(); i != edgelists.end(); ++i) {
it_src = thrust::copy(rmm::exec_policy(handle.get_stream()),
std::get<0>(*i).begin(),
std::get<0>(*i).end(),
it_src);
it_dst = thrust::copy(rmm::exec_policy(handle.get_stream()),
std::get<1>(*i).begin(),
std::get<1>(*i).end(),
it_dst);
}
rmm::device_uvector<vertex_t> d_sources(h_sources.size(), handle.get_stream());
raft::copy(d_sources.data(), h_sources.data(), h_sources.size(), handle.get_stream());
// create the graph
cugraph::graph_t<vertex_t, edge_t, false, false> graph(handle);
rmm::device_uvector<vertex_t> d_renumber_map_labels(0, handle.get_stream());
rmm::device_uvector<vertex_t> d_vertices(n_vertices, handle.get_stream());
rmm::device_uvector<weight_t> d_weights(n_edges, handle.get_stream());
thrust::sequence(
rmm::exec_policy(handle.get_stream()), d_vertices.begin(), d_vertices.end(), vertex_t{0});
std::tie(graph, std::ignore, std::ignore, std::ignore, std::ignore) = cugraph::
create_graph_from_edgelist<vertex_t, edge_t, weight_t, edge_t, int32_t, false, false>(
handle,
std::move(d_vertices),
std::move(d_srcs),
std::move(d_dst),
std::nullopt,
std::nullopt,
std::nullopt,
cugraph::graph_properties_t{false, true},
false);
auto graph_view = graph.view();
std::vector<rmm::device_uvector<vertex_t>> d_distances_ref{};
std::vector<rmm::device_uvector<vertex_t>> d_predecessors_ref{};
std::vector<std::vector<vertex_t>> h_distances_ref(h_sources.size());
std::vector<std::vector<vertex_t>> h_predecessors_ref(h_sources.size());
d_distances_ref.reserve(h_sources.size());
d_predecessors_ref.reserve(h_sources.size());
for (size_t i = 0; i < h_sources.size(); i++) {
rmm::device_uvector<vertex_t> tmp_distances(graph_view.number_of_vertices(),
handle.get_next_usable_stream(i));
rmm::device_uvector<vertex_t> tmp_predecessors(graph_view.number_of_vertices(),
handle.get_next_usable_stream(i));
d_distances_ref.push_back(std::move(tmp_distances));
d_predecessors_ref.push_back(std::move(tmp_predecessors));
}
// warm up
bool direction_optimizing = false;
vertex_t source = h_sources[0];
rmm::device_scalar<vertex_t> const d_source_0(source, handle.get_stream());
cugraph::bfs(handle,
graph_view,
d_distances_ref[0].begin(),
d_predecessors_ref[0].begin(),
d_source_0.data(),
size_t{1},
direction_optimizing,
configuration.radius);
// one by one
HighResTimer hr_timer;
hr_timer.start("bfs");
hipProfilerStart();
for (size_t i = 0; i < h_sources.size(); i++) {
source = h_sources[i];
rmm::device_scalar<vertex_t> const d_source_i(source, handle.get_stream());
cugraph::bfs(handle,
graph_view,
d_distances_ref[i].begin(),
d_predecessors_ref[i].begin(),
d_source_i.data(),
size_t{1},
direction_optimizing,
configuration.radius);
}
hipProfilerStop();
hr_timer.stop();
// ms
rmm::device_uvector<vertex_t> d_distances(graph_view.number_of_vertices(), handle.get_stream());
rmm::device_uvector<vertex_t> d_predecessors(graph_view.number_of_vertices(),
handle.get_stream());
hr_timer.start("msbfs");
hipProfilerStart();
cugraph::bfs(handle,
graph_view,
d_distances.begin(),
d_predecessors.begin(),
d_sources.data(),
h_sources.size(),
direction_optimizing,
configuration.radius);
hipProfilerStop();
hr_timer.stop();
hr_timer.display_and_clear(std::cout);
// checksum
vertex_t ref_sum = 0;
for (size_t i = 0; i < h_sources.size(); i++) {
thrust::replace(rmm::exec_policy(handle.get_stream()),
d_distances_ref[i].begin(),
d_distances_ref[i].end(),
std::numeric_limits<vertex_t>::max(),
static_cast<vertex_t>(0));
ref_sum += thrust::reduce(rmm::exec_policy(handle.get_stream()),
d_distances_ref[i].begin(),
d_distances_ref[i].end(),
static_cast<vertex_t>(0));
}
thrust::replace(rmm::exec_policy(handle.get_stream()),
d_distances.begin(),
d_distances.end(),
std::numeric_limits<vertex_t>::max(),
static_cast<vertex_t>(0));
vertex_t ms_sum = thrust::reduce(rmm::exec_policy(handle.get_stream()),
d_distances.begin(),
d_distances.end(),
static_cast<vertex_t>(0));
ASSERT_TRUE(ref_sum > 0);
ASSERT_TRUE(ref_sum < std::numeric_limits<vertex_t>::max());
ASSERT_TRUE(ref_sum == ms_sum);
}
};
TEST_P(Tests_MsBfs, CheckInt32Int32) { run_current_test<int32_t, int32_t>(GetParam()); }
INSTANTIATE_TEST_SUITE_P(rmat_small_test,
Tests_MsBfs,
::testing::Values(MsBfs_Usecase{8, 10, 16, 32, 2},
MsBfs_Usecase{512, 10, 16, 32, 3},
MsBfs_Usecase{512, 10, 16, 32, 100}));
CUGRAPH_TEST_PROGRAM_MAIN()
| d94b4f10026d81a2b32cdca952fc1d7b6e805f62.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <utilities/base_fixture.hpp>
#include <utilities/test_utilities.hpp>
#include <cugraph/algorithms.hpp>
#include <cugraph/graph.hpp>
#include <cugraph/graph_generators.hpp>
#include <cugraph/graph_view.hpp>
#include <cugraph/utilities/high_res_timer.hpp>
#include <raft/core/handle.hpp>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <cuda_profiler_api.h>
#include <thrust/copy.h>
#include <thrust/extrema.h>
#include <thrust/functional.h>
#include <thrust/reduce.h>
#include <thrust/replace.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <iostream>
#include <limits>
#include <tuple>
#include <utilities/thrust_wrapper.hpp>
#include <vector>
struct MsBfs_Usecase {
size_t n_edgelists;
size_t min_scale;
size_t max_scale;
size_t edge_factor;
int radius;
};
template <typename vertex_t>
void translate_vertex_ids(raft::handle_t const& handle,
rmm::device_uvector<vertex_t>& d_src_v,
rmm::device_uvector<vertex_t>& d_dst_v,
vertex_t vertex_id_offset)
{
thrust::transform(rmm::exec_policy(handle.get_stream()),
d_src_v.begin(),
d_src_v.end(),
d_src_v.begin(),
[offset = vertex_id_offset] __device__(vertex_t v) { return offset + v; });
thrust::transform(rmm::exec_policy(handle.get_stream()),
d_dst_v.begin(),
d_dst_v.end(),
d_dst_v.begin(),
[offset = vertex_id_offset] __device__(vertex_t v) { return offset + v; });
}
class Tests_MsBfs : public ::testing::TestWithParam<MsBfs_Usecase> {
public:
Tests_MsBfs() {}
static void SetUpTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
template <typename vertex_t, typename edge_t>
void run_current_test(MsBfs_Usecase const& configuration)
{
using weight_t = float;
auto stream_pool = std::make_shared<rmm::cuda_stream_pool>(16);
raft::handle_t handle(rmm::cuda_stream_per_thread, stream_pool);
auto edgelists =
cugraph::generate_rmat_edgelists<vertex_t>(handle,
configuration.n_edgelists,
configuration.min_scale,
configuration.max_scale,
configuration.edge_factor,
cugraph::generator_distribution_t::POWER_LAW,
cugraph::generator_distribution_t::UNIFORM,
uint64_t{0});
// form aggregated edge list
vertex_t n_edges = 0, offset = 0, n_vertices = 0;
std::vector<vertex_t> h_sources;
for (auto i = edgelists.begin(); i != edgelists.end(); ++i) {
// translate
translate_vertex_ids(handle, std::get<0>(*i), std::get<1>(*i), offset);
n_edges += std::get<0>(*i).size();
// populating sources with the smallest v_id in the component
h_sources.push_back(offset);
// v offset is max of src/dst
auto max_src = thrust::reduce(rmm::exec_policy(handle.get_stream()),
std::get<0>(*i).begin(),
std::get<0>(*i).end(),
static_cast<vertex_t>(0),
thrust::maximum<vertex_t>());
auto max_dst = thrust::reduce(rmm::exec_policy(handle.get_stream()),
std::get<1>(*i).begin(),
std::get<1>(*i).end(),
static_cast<vertex_t>(0),
thrust::maximum<vertex_t>());
offset = std::max(max_src, max_dst) + 1;
}
n_vertices = offset;
std::cout << n_vertices << std::endl;
std::cout << n_edges << std::endl;
rmm::device_uvector<vertex_t> d_srcs(n_edges, handle.get_stream());
rmm::device_uvector<vertex_t> d_dst(n_edges, handle.get_stream());
auto it_src = d_srcs.begin();
auto it_dst = d_dst.begin();
for (auto i = edgelists.begin(); i != edgelists.end(); ++i) {
it_src = thrust::copy(rmm::exec_policy(handle.get_stream()),
std::get<0>(*i).begin(),
std::get<0>(*i).end(),
it_src);
it_dst = thrust::copy(rmm::exec_policy(handle.get_stream()),
std::get<1>(*i).begin(),
std::get<1>(*i).end(),
it_dst);
}
rmm::device_uvector<vertex_t> d_sources(h_sources.size(), handle.get_stream());
raft::copy(d_sources.data(), h_sources.data(), h_sources.size(), handle.get_stream());
// create the graph
cugraph::graph_t<vertex_t, edge_t, false, false> graph(handle);
rmm::device_uvector<vertex_t> d_renumber_map_labels(0, handle.get_stream());
rmm::device_uvector<vertex_t> d_vertices(n_vertices, handle.get_stream());
rmm::device_uvector<weight_t> d_weights(n_edges, handle.get_stream());
thrust::sequence(
rmm::exec_policy(handle.get_stream()), d_vertices.begin(), d_vertices.end(), vertex_t{0});
std::tie(graph, std::ignore, std::ignore, std::ignore, std::ignore) = cugraph::
create_graph_from_edgelist<vertex_t, edge_t, weight_t, edge_t, int32_t, false, false>(
handle,
std::move(d_vertices),
std::move(d_srcs),
std::move(d_dst),
std::nullopt,
std::nullopt,
std::nullopt,
cugraph::graph_properties_t{false, true},
false);
auto graph_view = graph.view();
std::vector<rmm::device_uvector<vertex_t>> d_distances_ref{};
std::vector<rmm::device_uvector<vertex_t>> d_predecessors_ref{};
std::vector<std::vector<vertex_t>> h_distances_ref(h_sources.size());
std::vector<std::vector<vertex_t>> h_predecessors_ref(h_sources.size());
d_distances_ref.reserve(h_sources.size());
d_predecessors_ref.reserve(h_sources.size());
for (size_t i = 0; i < h_sources.size(); i++) {
rmm::device_uvector<vertex_t> tmp_distances(graph_view.number_of_vertices(),
handle.get_next_usable_stream(i));
rmm::device_uvector<vertex_t> tmp_predecessors(graph_view.number_of_vertices(),
handle.get_next_usable_stream(i));
d_distances_ref.push_back(std::move(tmp_distances));
d_predecessors_ref.push_back(std::move(tmp_predecessors));
}
// warm up
bool direction_optimizing = false;
vertex_t source = h_sources[0];
rmm::device_scalar<vertex_t> const d_source_0(source, handle.get_stream());
cugraph::bfs(handle,
graph_view,
d_distances_ref[0].begin(),
d_predecessors_ref[0].begin(),
d_source_0.data(),
size_t{1},
direction_optimizing,
configuration.radius);
// one by one
HighResTimer hr_timer;
hr_timer.start("bfs");
cudaProfilerStart();
for (size_t i = 0; i < h_sources.size(); i++) {
source = h_sources[i];
rmm::device_scalar<vertex_t> const d_source_i(source, handle.get_stream());
cugraph::bfs(handle,
graph_view,
d_distances_ref[i].begin(),
d_predecessors_ref[i].begin(),
d_source_i.data(),
size_t{1},
direction_optimizing,
configuration.radius);
}
cudaProfilerStop();
hr_timer.stop();
// ms
rmm::device_uvector<vertex_t> d_distances(graph_view.number_of_vertices(), handle.get_stream());
rmm::device_uvector<vertex_t> d_predecessors(graph_view.number_of_vertices(),
handle.get_stream());
hr_timer.start("msbfs");
cudaProfilerStart();
cugraph::bfs(handle,
graph_view,
d_distances.begin(),
d_predecessors.begin(),
d_sources.data(),
h_sources.size(),
direction_optimizing,
configuration.radius);
cudaProfilerStop();
hr_timer.stop();
hr_timer.display_and_clear(std::cout);
// checksum
vertex_t ref_sum = 0;
for (size_t i = 0; i < h_sources.size(); i++) {
thrust::replace(rmm::exec_policy(handle.get_stream()),
d_distances_ref[i].begin(),
d_distances_ref[i].end(),
std::numeric_limits<vertex_t>::max(),
static_cast<vertex_t>(0));
ref_sum += thrust::reduce(rmm::exec_policy(handle.get_stream()),
d_distances_ref[i].begin(),
d_distances_ref[i].end(),
static_cast<vertex_t>(0));
}
thrust::replace(rmm::exec_policy(handle.get_stream()),
d_distances.begin(),
d_distances.end(),
std::numeric_limits<vertex_t>::max(),
static_cast<vertex_t>(0));
vertex_t ms_sum = thrust::reduce(rmm::exec_policy(handle.get_stream()),
d_distances.begin(),
d_distances.end(),
static_cast<vertex_t>(0));
ASSERT_TRUE(ref_sum > 0);
ASSERT_TRUE(ref_sum < std::numeric_limits<vertex_t>::max());
ASSERT_TRUE(ref_sum == ms_sum);
}
};
TEST_P(Tests_MsBfs, CheckInt32Int32) { run_current_test<int32_t, int32_t>(GetParam()); }
INSTANTIATE_TEST_SUITE_P(rmat_small_test,
Tests_MsBfs,
::testing::Values(MsBfs_Usecase{8, 10, 16, 32, 2},
MsBfs_Usecase{512, 10, 16, 32, 3},
MsBfs_Usecase{512, 10, 16, 32, 100}));
CUGRAPH_TEST_PROGRAM_MAIN()
|
0e5435b6b0de18e6bcf7cae5eb9b8a1edd59e579.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
int exec_parallel_gpu(solver_props *props, const char *outputs_dirname, double *progress, int resuming){
unsigned int i;
unsigned int inputid;
unsigned int modelid;
unsigned int num_gpu_threads;
unsigned int num_gpu_blocks;
unsigned int active_models;
solver_props *device_props;
#if NUM_SAMPLED_INPUTS > 0
sampled_input_t tmp_sampled_inputs[STRUCT_SIZE * NUM_SAMPLED_INPUTS];
#endif
num_gpu_threads = GPU_BLOCK_SIZE < props->num_models ? GPU_BLOCK_SIZE : props->num_models;
num_gpu_blocks = (props->num_models + GPU_BLOCK_SIZE - 1) / GPU_BLOCK_SIZE;
// Initialize all iterators to running
active_models = 1;
for(modelid = 0; modelid < props->num_models; modelid++){
for(i=0;i<NUM_ITERATORS;i++){
props[i].running[modelid] = 1;
}
}
// Initialize GPU device memory for all solvers (returns pointer to device memory)
device_props = gpu_init_props(props);
while(active_models){
// Execute models on the GPU
hipLaunchKernelGGL(( exec_kernel_gpu), dim3(num_gpu_blocks), dim3(num_gpu_threads), 0, 0, device_props, resuming);
resuming = 1;
// Copy data back to the host
cutilSafeCall(hipMemcpy(props->ob, props->gpu.ob, props->ob_size, hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(props->time, props->gpu.time, props->num_models * sizeof(CDATAFORMAT), hipMemcpyDeviceToHost));
#if NUM_SAMPLED_INPUTS > 0
cutilSafeCall(hipMemcpyFromSymbol(tmp_sampled_inputs, sampled_inputs, STRUCT_SIZE * NUM_SAMPLED_INPUTS * sizeof(sampled_input_t), 0, hipMemcpyDeviceToHost));
#endif
active_models = 0;
// Copy data to external api interface
for(modelid = 0; modelid < props->num_models; modelid++){
if(0 != log_outputs(props->ob, outputs_dirname, props->modelid_offset, modelid)) return ERRMEM;
progress[modelid] = (props->time[modelid] - props->starttime) / (props->stoptime - props->starttime);
active_models |= !props->ob->finished[modelid];
#if NUM_SAMPLED_INPUTS > 0
if (!props->ob->finished[modelid]) {
for (inputid = NUM_CONSTANT_INPUTS; inputid < NUM_CONSTANT_INPUTS + NUM_SAMPLED_INPUTS; inputid++) {
sampled_input_t *input = &tmp_sampled_inputs[STRUCT_IDX * NUM_INPUTS + SAMPLED_INPUT_ID(inputid)];
if (input->idx[ARRAY_IDX] >= input->buffered_size[ARRAY_IDX]) {
read_sampled_input(input, props->time[ARRAY_IDX], outputs_dirname, inputid, props->modelid_offset, modelid);
cutilSafeCall(hipMemcpyToSymbol(sampled_inputs, input, sizeof(sampled_input_t), SAMPLED_INPUT_ID(inputid) * sizeof(sampled_input_t), hipMemcpyHostToDevice));
}
}
}
#endif
}
}
// Copy any remaining data back from GPU
gpu_finalize_props(props);
return SUCCESS;
}
| 0e5435b6b0de18e6bcf7cae5eb9b8a1edd59e579.cu | int exec_parallel_gpu(solver_props *props, const char *outputs_dirname, double *progress, int resuming){
unsigned int i;
unsigned int inputid;
unsigned int modelid;
unsigned int num_gpu_threads;
unsigned int num_gpu_blocks;
unsigned int active_models;
solver_props *device_props;
#if NUM_SAMPLED_INPUTS > 0
sampled_input_t tmp_sampled_inputs[STRUCT_SIZE * NUM_SAMPLED_INPUTS];
#endif
num_gpu_threads = GPU_BLOCK_SIZE < props->num_models ? GPU_BLOCK_SIZE : props->num_models;
num_gpu_blocks = (props->num_models + GPU_BLOCK_SIZE - 1) / GPU_BLOCK_SIZE;
// Initialize all iterators to running
active_models = 1;
for(modelid = 0; modelid < props->num_models; modelid++){
for(i=0;i<NUM_ITERATORS;i++){
props[i].running[modelid] = 1;
}
}
// Initialize GPU device memory for all solvers (returns pointer to device memory)
device_props = gpu_init_props(props);
while(active_models){
// Execute models on the GPU
exec_kernel_gpu<<<num_gpu_blocks, num_gpu_threads>>>(device_props, resuming);
resuming = 1;
// Copy data back to the host
cutilSafeCall(cudaMemcpy(props->ob, props->gpu.ob, props->ob_size, cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(props->time, props->gpu.time, props->num_models * sizeof(CDATAFORMAT), cudaMemcpyDeviceToHost));
#if NUM_SAMPLED_INPUTS > 0
cutilSafeCall(cudaMemcpyFromSymbol(tmp_sampled_inputs, sampled_inputs, STRUCT_SIZE * NUM_SAMPLED_INPUTS * sizeof(sampled_input_t), 0, cudaMemcpyDeviceToHost));
#endif
active_models = 0;
// Copy data to external api interface
for(modelid = 0; modelid < props->num_models; modelid++){
if(0 != log_outputs(props->ob, outputs_dirname, props->modelid_offset, modelid)) return ERRMEM;
progress[modelid] = (props->time[modelid] - props->starttime) / (props->stoptime - props->starttime);
active_models |= !props->ob->finished[modelid];
#if NUM_SAMPLED_INPUTS > 0
if (!props->ob->finished[modelid]) {
for (inputid = NUM_CONSTANT_INPUTS; inputid < NUM_CONSTANT_INPUTS + NUM_SAMPLED_INPUTS; inputid++) {
sampled_input_t *input = &tmp_sampled_inputs[STRUCT_IDX * NUM_INPUTS + SAMPLED_INPUT_ID(inputid)];
if (input->idx[ARRAY_IDX] >= input->buffered_size[ARRAY_IDX]) {
read_sampled_input(input, props->time[ARRAY_IDX], outputs_dirname, inputid, props->modelid_offset, modelid);
cutilSafeCall(cudaMemcpyToSymbol(sampled_inputs, input, sizeof(sampled_input_t), SAMPLED_INPUT_ID(inputid) * sizeof(sampled_input_t), cudaMemcpyHostToDevice));
}
}
}
#endif
}
}
// Copy any remaining data back from GPU
gpu_finalize_props(props);
return SUCCESS;
}
|
5f981ba8a837dd6a5d8422d2d109db4cc3e31f39.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
constexpr size_t N = 1 << 20;
constexpr int NUM_THREADS = 256;
constexpr int NUM_BLOCKS = (N + NUM_THREADS-1) / NUM_THREADS;
__global__
void add(size_t n, float *x, float *y){
const int start_index = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
for(size_t i = start_index; i < n; i += stride){
y[i] = x[i] + y[i];
}
}
int main(void){
float *x, *y;
hipMallocManaged(&x, N * sizeof(float));
hipMallocManaged(&y, N * sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipLaunchKernelGGL(( add), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, N, x, y);
hipDeviceSynchronize();
std::cout << "Done.\n";
//std::cin.get();
hipFree(x);
hipFree(y);
}
| 5f981ba8a837dd6a5d8422d2d109db4cc3e31f39.cu | #include <iostream>
constexpr size_t N = 1 << 20;
constexpr int NUM_THREADS = 256;
constexpr int NUM_BLOCKS = (N + NUM_THREADS-1) / NUM_THREADS;
__global__
void add(size_t n, float *x, float *y){
const int start_index = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
for(size_t i = start_index; i < n; i += stride){
y[i] = x[i] + y[i];
}
}
int main(void){
float *x, *y;
cudaMallocManaged(&x, N * sizeof(float));
cudaMallocManaged(&y, N * sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
add<<<NUM_BLOCKS, NUM_THREADS>>>(N, x, y);
cudaDeviceSynchronize();
std::cout << "Done.\n";
//std::cin.get();
cudaFree(x);
cudaFree(y);
}
|
8eacbac62b80c53e69c5bd87c9303156b8692bd0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cusolverDn.h>
#include "constants.h"
#include "struct.h"
void applyFrameElasticOperator(double *, double *, double *, double *, double *, fcndata &);
void applyPenaltyOperator(double *, double *, double *, fcndata &);
__global__ void ipcgMatSumKernel(double *d_AxVec, double ldmWgt, double *d_KivMat,
double *d_elYMat, double btmWgt, double *d_pnlMat, int ttlNum)
{
int ttlIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( ttlIdx < ttlNum )
{
d_AxVec[ttlIdx] = ldmWgt * d_KivMat[ttlIdx]
+ d_elYMat[ttlIdx] + btmWgt * d_pnlMat[ttlIdx];
}
return;
}
void ipcgWholeMatrixMultiplication(double *d_AxVec, double *d_xVec, fcndata &fcnObj)
{
int lmkNum = fcnObj.prm.lmkNum;
double ldmWgt = fcnObj.prm.ldmWgt;
double btmWgt = fcnObj.prm.btmWgt;
double *d_KivMat = fcnObj.d_KivMat;
double *d_elYMat = fcnObj.d_elYMat;
double *d_pnlMat = fcnObj.d_pnlMat;
hipMemcpy(d_KivMat, d_xVec, sizeof(double) * lmkNum * DIMNUM, hipMemcpyDeviceToDevice);
hipsolverDnDpotrs(fcnObj.solvHdl, HIPBLAS_FILL_MODE_LOWER, lmkNum, DIMNUM, fcnObj.d_knLMat, lmkNum,
d_KivMat, lmkNum, fcnObj.d_status);
applyFrameElasticOperator(d_elYMat, d_xVec, fcnObj.d_lmkNowEdgMat,
fcnObj.d_nmlNowMat, fcnObj.d_tsvNowMat, fcnObj);
applyPenaltyOperator(d_pnlMat, fcnObj.d_lmkNowBtmMat, d_xVec, fcnObj);
int blkNum = (lmkNum * DIMNUM - 1) / BLKDIM + 1;
hipLaunchKernelGGL(( ipcgMatSumKernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_AxVec, ldmWgt, d_KivMat,
d_elYMat, btmWgt, d_pnlMat, lmkNum * DIMNUM);
return;
}
| 8eacbac62b80c53e69c5bd87c9303156b8692bd0.cu | #include <cusolverDn.h>
#include "constants.h"
#include "struct.h"
void applyFrameElasticOperator(double *, double *, double *, double *, double *, fcndata &);
void applyPenaltyOperator(double *, double *, double *, fcndata &);
__global__ void ipcgMatSumKernel(double *d_AxVec, double ldmWgt, double *d_KivMat,
double *d_elYMat, double btmWgt, double *d_pnlMat, int ttlNum)
{
int ttlIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( ttlIdx < ttlNum )
{
d_AxVec[ttlIdx] = ldmWgt * d_KivMat[ttlIdx]
+ d_elYMat[ttlIdx] + btmWgt * d_pnlMat[ttlIdx];
}
return;
}
void ipcgWholeMatrixMultiplication(double *d_AxVec, double *d_xVec, fcndata &fcnObj)
{
int lmkNum = fcnObj.prm.lmkNum;
double ldmWgt = fcnObj.prm.ldmWgt;
double btmWgt = fcnObj.prm.btmWgt;
double *d_KivMat = fcnObj.d_KivMat;
double *d_elYMat = fcnObj.d_elYMat;
double *d_pnlMat = fcnObj.d_pnlMat;
cudaMemcpy(d_KivMat, d_xVec, sizeof(double) * lmkNum * DIMNUM, cudaMemcpyDeviceToDevice);
cusolverDnDpotrs(fcnObj.solvHdl, CUBLAS_FILL_MODE_LOWER, lmkNum, DIMNUM, fcnObj.d_knLMat, lmkNum,
d_KivMat, lmkNum, fcnObj.d_status);
applyFrameElasticOperator(d_elYMat, d_xVec, fcnObj.d_lmkNowEdgMat,
fcnObj.d_nmlNowMat, fcnObj.d_tsvNowMat, fcnObj);
applyPenaltyOperator(d_pnlMat, fcnObj.d_lmkNowBtmMat, d_xVec, fcnObj);
int blkNum = (lmkNum * DIMNUM - 1) / BLKDIM + 1;
ipcgMatSumKernel <<<blkNum, BLKDIM>>> (d_AxVec, ldmWgt, d_KivMat,
d_elYMat, btmWgt, d_pnlMat, lmkNum * DIMNUM);
return;
}
|
f6a1460901b333adb854edd1c5ad10dae36a919e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
using namespace std;
#define BLOCK_SIZE 16
#define BASE_TYPE double
__global__ void matrixMult(const BASE_TYPE *A, BASE_TYPE *C, int Acols, int Arows)
{
int i0 = Acols *(blockDim.y*blockIdx.y + threadIdx.y);
//int iAT = Arows*(blockDim.x*blockIdx.x + threadIdx.x) + blockDim.y*blockIdx.y + threadIdx.y;
BASE_TYPE sum = 0;
for (int k = 0; k < Acols; k++)
{
sum = +A[i0 + k] * A[i0+k];
}
int ind = Acols* (blockDim.y*blockIdx.y + threadIdx.y) + blockDim.x*blockIdx.x + threadIdx.x;
C[ind] = sum;
}
int main()
{
int Arows = 100;
int Acols = 200;
size_t Asize = Arows*Acols * sizeof(BASE_TYPE);
BASE_TYPE *h_A = (BASE_TYPE *)malloc(Asize);
BASE_TYPE *h_C = (BASE_TYPE *)malloc(Asize);
for (int i = 0; i < Arows*Acols; i++)
{
h_A[i] = rand() / (BASE_TYPE)RAND_MAX;
}
for (int i = 0; i < Arows*Acols; i++)
{
printf("h_A[%d]=%d ", i, h_A[i]);
}
BASE_TYPE *d_A = NULL;
hipMalloc((void **)&d_A, Asize);
BASE_TYPE *d_C = NULL;
hipMalloc((void **)&d_C, Asize);
hipMemcpy(d_A, h_A, Asize, hipMemcpyHostToDevice);
dim3 threadsPerBlock = dim3(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocksPerGrid = dim3(Acols / BLOCK_SIZE, Arows / BLOCK_SIZE);
hipLaunchKernelGGL(( matrixMult) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, d_A, d_C, Acols, Arows);
hipMemcpy(h_C, d_C, Asize, hipMemcpyDeviceToHost);
printf("Test Started\n");
bool t = false;
for (int i = 0; i < Arows; i++)
{
for (int j = 0; j < Arows; j++)
{
if (h_C[i*Arows + j] !=1)
{
t = true;
//fprintf(stderr, "Result verification failed at element [%d,%d]!\n", i, j);
//printf("sum=%f,h_C[i*Arows + j]=%f\n", 1, h_C[i*Arows + j]);
//exit(EXIT_FAILURE);
printf("Matrix A is not orthogonal\n");
}
if (t) break;
}
if (t) break;
}
printf("Test Passed\n");
hipFree(d_A);
hipFree(d_C);
free(h_A);
free(h_C);
getchar();
system("pause");
}
| f6a1460901b333adb854edd1c5ad10dae36a919e.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
using namespace std;
#define BLOCK_SIZE 16
#define BASE_TYPE double
__global__ void matrixMult(const BASE_TYPE *A, BASE_TYPE *C, int Acols, int Arows)
{
int i0 = Acols *(blockDim.y*blockIdx.y + threadIdx.y);
//int iAT = Arows*(blockDim.x*blockIdx.x + threadIdx.x) + blockDim.y*blockIdx.y + threadIdx.y;
BASE_TYPE sum = 0;
for (int k = 0; k < Acols; k++)
{
sum = +A[i0 + k] * A[i0+k];
}
int ind = Acols* (blockDim.y*blockIdx.y + threadIdx.y) + blockDim.x*blockIdx.x + threadIdx.x;
C[ind] = sum;
}
int main()
{
int Arows = 100;
int Acols = 200;
size_t Asize = Arows*Acols * sizeof(BASE_TYPE);
BASE_TYPE *h_A = (BASE_TYPE *)malloc(Asize);
BASE_TYPE *h_C = (BASE_TYPE *)malloc(Asize);
for (int i = 0; i < Arows*Acols; i++)
{
h_A[i] = rand() / (BASE_TYPE)RAND_MAX;
}
for (int i = 0; i < Arows*Acols; i++)
{
printf("h_A[%d]=%d ", i, h_A[i]);
}
BASE_TYPE *d_A = NULL;
cudaMalloc((void **)&d_A, Asize);
BASE_TYPE *d_C = NULL;
cudaMalloc((void **)&d_C, Asize);
cudaMemcpy(d_A, h_A, Asize, cudaMemcpyHostToDevice);
dim3 threadsPerBlock = dim3(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocksPerGrid = dim3(Acols / BLOCK_SIZE, Arows / BLOCK_SIZE);
matrixMult <<<blocksPerGrid, threadsPerBlock >>> (d_A, d_C, Acols, Arows);
cudaMemcpy(h_C, d_C, Asize, cudaMemcpyDeviceToHost);
printf("Test Started\n");
bool t = false;
for (int i = 0; i < Arows; i++)
{
for (int j = 0; j < Arows; j++)
{
if (h_C[i*Arows + j] !=1)
{
t = true;
//fprintf(stderr, "Result verification failed at element [%d,%d]!\n", i, j);
//printf("sum=%f,h_C[i*Arows + j]=%f\n", 1, h_C[i*Arows + j]);
//exit(EXIT_FAILURE);
printf("Matrix A is not orthogonal\n");
}
if (t) break;
}
if (t) break;
}
printf("Test Passed\n");
cudaFree(d_A);
cudaFree(d_C);
free(h_A);
free(h_C);
getchar();
system("pause");
}
|
bb4d8fe037ddd85f5b26d49a6814bcea0444560a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @brief
* utils
*
* @copyright
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <algorithm>
#include "k2/csrc/math.h"
#include "k2/csrc/utils.h"
namespace k2 {
// See FillValues() where this is invoked. It fills a region with
// a constant value.
__global__ void FillValuesKernel(int32_t *data, int32_t num_values,
int32_t value) {
int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x),
stride = (gridDim.x * blockDim.x);
for (; job_idx < num_values; job_idx += stride) data[job_idx] = value;
}
// This launches a kernel. It's the same as doing:
// for (int32_t i = 0; i < num_values; i++) data[i] = value;
__device__ void FillValues(int32_t *data, int32_t num_values, int32_t value) {
int32_t block_size = 256;
int32_t grid_size = NumBlocks(num_values, block_size);
hipLaunchKernelGGL(( FillValuesKernel), dim3(grid_size), dim3(block_size), 0, 0, data, num_values, value);
}
// When we invoke this we make a big enough grid that there doesn't have to
// be a loop over rows, i.e. (gridDim.x * blockDim.x) / threads_per_row >=
// num_rows
__global__ void RowSplitsToRowIdsKernel(int32_t num_rows,
int32_t threads_per_row,
const int32_t *row_splits,
int32_t num_elems, int32_t *row_ids) {
int32_t thread = blockIdx.x * blockDim.x + threadIdx.x,
num_threads = gridDim.x * blockDim.x, row = thread / threads_per_row,
thread_this_row = thread % threads_per_row;
if (row >= num_rows) return;
K2_CHECK_GE(num_threads / threads_per_row, num_rows);
int32_t this_row_split = row_splits[row],
next_row_split = row_splits[row + 1],
row_length = next_row_split - this_row_split;
const int32_t max_loop = 8; // `max_loop` is heuristically chosen.
if (row_length / threads_per_row > max_loop) {
// We decide that looping too many times will be too slow, so we launch
// another kernel to fill in the value for this row. (This is CUDA dynamic
// parallelism).
if (thread_this_row == 0) {
FillValues(row_ids + this_row_split, row_length, row);
}
} else {
// TODO(dan): figure out how to unroll this?
for (; thread_this_row < row_length; thread_this_row += threads_per_row)
row_ids[this_row_split + thread_this_row] = row;
}
}
/*
See declaration of RowSplitsToRowIds() in utils.h. These are implementation
notes.
Suppose the range we need to fill with a
particular number (say, x) is from 1010 to 10000 inclusive (binary) The
first kernel writes x to positions 1010, 1100, 10000; the significance of
that sequence is we keep adding the smallest number we can add to get
another zero at the end of the binary representation, until we exceed the
range we're supposed to fill. The second kernel: for a given index into x
that is must fill (say, 1111), it asks "is the index currently here already
the right one?", which it can test using the function is_valid_index()
below; if it's not already correct, it searches in a sequence of positions:
1110, 1100, 1000, 0000, like our sequence above but going downwards, again
getting more zeros at the end of the binary representation, until it finds
the correct value in the array at the searched position; then it copies the
discovered value the original position requested (here, 1111).
First kernel pseudocode: for each index 'i' into 't', it does:
for (int32_t n=0, j = t[i]; j < t[i+1]; n++) {
x[j] = i;
if (j & (1<<n)) j += (1 << n);
}
Second kernel pseudocode: for each element of x, it searches for the right
index. Suppose we're given num_indexes == length(n) == length(t) - 1. Define
is_valid_index as follows:
// returns true if j is the value that we should be putting at position
'i' in x:
// that is, if t[j] <= i < t[j+1].
bool is_valid_index(i, j) {
return (j >= 0 && j < num_indexes && t[j] <= i && i < t[j+1]);
}
// We suppose we are given i (the position into x that we're responsible
for
// setting:
orig_i = i;
for (int32_t n=0; !is_valid_index(i, x[i]); n++) {
if (i & (1<<n)) i -= (1 << n);
}
x[orig_i] = x[i];
*/
void RowSplitsToRowIds(ContextPtr &c, int32_t num_rows,
const int32_t *row_splits, int32_t num_elems,
int32_t *row_ids) {
NVTX_RANGE(__func__);
if (num_rows <= 0 || num_elems <= 0) return;
DeviceType d = c->GetDeviceType();
if (d == kCpu) {
int32_t cur_row_start = row_splits[0];
K2_CHECK_EQ(cur_row_start, 0);
K2_CHECK_EQ(row_splits[num_rows], num_elems);
for (int32_t row = 0; row < num_rows; ++row) {
int32_t next_row_start = row_splits[row + 1];
for (; cur_row_start < next_row_start; ++cur_row_start)
row_ids[cur_row_start] = row;
}
} else {
K2_CHECK_EQ(d, kCuda);
if (1) {
// TODO: compare this for speed with the other branch. This is branch is
// much simpler, and will be considerably faster for "normal" cases ->
// probably preferred.
int32_t avg_elems_per_row = (num_elems + num_rows - 1) / num_rows,
threads_per_row = RoundUpToNearestPowerOfTwo(avg_elems_per_row),
tot_threads = num_rows * threads_per_row;
int32_t block_size = 256;
int32_t grid_size = NumBlocks(tot_threads, block_size);
hipLaunchKernelGGL(( K2_CUDA_SAFE_CALL(RowSplitsToRowIdsKernel), dim3(grid_size), dim3(block_size), 0,
c->GetCudaStream(),
num_rows, threads_per_row, row_splits, num_elems, row_ids));
} else {
// TODO: Will probably just delete this branch at some point.
// The following algorithm isn't particularly adapted to GPU hardware in
// terms of coalesced reads and writes and so on, but it has reasonable
// asymptotic time complexity (assuming all kernels run in parallel),
// specifically: O(log(largest(row_splits[i+1]-row_splits[i])))
auto lambda_init_minus_one = [=] __host__ __device__(int32_t i) {
row_ids[i] = -1;
};
Eval(c, num_elems + 1, lambda_init_minus_one);
auto lambda_phase_one = [=] __host__ __device__(int32_t i) {
int32_t this_row_split = row_splits[i],
next_row_split =
(i < num_rows ? row_splits[i + 1] : this_row_split + 1);
if (this_row_split < next_row_split) row_ids[this_row_split] = i;
// we have to fill in row_ids[this_row_split],
// row_ids[this_row_split+1]... row_ids[next_row_split-1] with the same
// value but that could be a long loop. Instead we write at
// this_row_split and all indexes this_row_split < i < next_row_split
// such that i is the result of rounding up this_row_split to
// (something)*2^n, for n = 1, 2, 3, ... this will take time logarithmic
// in (next_row_split - this_row_split). we can then fill in the gaps
// with a logarithmic-time loop, by looking for a value that's not (-1)
// by rounding the current index down to successively higher powers
// of 2.
for (int32_t power = 0, j = this_row_split;
j + (1 << power) < next_row_split; power++) {
if (j & (1 << power)) {
j += (1 << power);
// we know that j is now < next_row_split, because we checked "j +
// (1<<power) < next_row_split" in the loop condition.
// Note, we don't want a loop-within-a-loop because of how SIMT
// works...
row_ids[j] = i;
}
}
};
Eval(c, num_elems + 1, lambda_phase_one);
auto lambda_phase_two = [=] __host__ __device__(int32_t j) {
int32_t row_index = row_ids[j];
if (row_index != -1) return;
int32_t power = 0, j2 = j;
for (; row_index != -1; power++) {
if (j2 & (1 << power)) {
j2 -= (1 << power);
row_index = row_ids[j2];
}
assert(power < 31);
}
row_ids[j] = row_ids[j2];
};
// could do the next line for num_elems+1, but the element at `num_elems`
// will already be set.
Eval(c, num_elems, lambda_phase_two);
}
}
}
/*
When we invoke this we make a big enough grid that there doesn't have to
be a loop over elements, i.e. (gridDim.x * blockDim.x) / threads_per_elem >
num_elems. (must be >=, because we imagine a phantom element at [num_elems]
with the value `num_rows`.)
@param [in] num_elems Number of elements in ragged matrix
@param [in] threads_per_elem Number of threads we allocate per element.
Must be >= 1.
@param [in] row_ids The row_ids vector, of length `num_elems`;
must be nonnegative and non-decreasing and
all elements < num_rows.
@param [in] num_rows Number of rows, must be greater than the
largest (== last) element of `row_ids`.
@param [out] row_splits This kernel will output a non-decreasing
vector of length num_rows + 1, such that
row_splits[0] == 0,
row_splits[num_rows] == num_elems,
and row_splits[row_ids[i]] <= i <
row_splits[row_ids[i]+1]
*/
__global__ void RowIdsToRowSplitsKernel(int32_t num_elems,
int32_t threads_per_elem,
const int32_t *row_ids,
int32_t num_rows, int32_t *row_splits) {
int32_t thread = (blockIdx.x * blockDim.x + threadIdx.x),
num_threads = gridDim.x * blockDim.x,
elem = thread / threads_per_elem,
thread_this_elem = thread % threads_per_elem;
K2_CHECK_GE(num_threads / threads_per_elem, num_elems);
if (elem > num_elems) return;
int32_t this_row, prev_row;
if (elem == 0) {
prev_row = -1;
this_row = row_ids[elem];
} else if (elem == num_elems) {
prev_row = row_ids[elem - 1];
this_row = num_rows;
} else {
prev_row = row_ids[elem - 1];
this_row = row_ids[elem];
}
// `num_splits` is the number of splits we have to write, usually 0 or 1
// but in principle unlimited as there could be empty rows. The
// relationship between row_ids and row_splits is more symmetric than
// you might expect.
int32_t num_splits = this_row - prev_row;
const int32_t max_loop = 8; // `max_loop` is heuristically chosen.
if (num_splits / threads_per_elem > max_loop) {
if (thread_this_elem == 0) {
FillValues(row_splits + prev_row + 1, num_splits, elem);
}
} else {
// TODO(dan): figure out how to unroll this?
for (; thread_this_elem < num_splits; thread_this_elem += threads_per_elem)
row_splits[prev_row + 1 + thread_this_elem] = elem;
}
}
// see declaration in utils.h for documentation.
void RowIdsToRowSplits(ContextPtr &c, int32_t num_elems, const int32_t *row_ids,
bool no_empty_rows, int32_t num_rows,
int32_t *row_splits) {
NVTX_RANGE(__func__);
// process corner case first
if (num_elems == 0) {
auto lambda_set_values = [=] __host__ __device__(int32_t i) {
row_splits[i] = 0;
};
Eval(c, num_rows + 1, lambda_set_values);
return;
}
DeviceType d = c->GetDeviceType();
if (d == kCpu) {
int32_t cur_row = -1;
for (int32_t i = 0; i < num_elems; i++) {
int32_t row = row_ids[i];
K2_CHECK_GE(row, cur_row);
while (cur_row < row) {
cur_row++;
row_splits[cur_row] = i;
}
}
// cur_row must be >= 0 here as num_elems > 0
K2_CHECK_GE(cur_row, 0);
while (cur_row < num_rows) {
row_splits[++cur_row] = num_elems;
}
} else {
K2_CHECK_EQ(d, kCuda);
if (no_empty_rows) {
auto lambda_simple = [=] __host__ __device__(int32_t i) {
int32_t this_row = row_ids[i], prev_row;
if (i > 0) {
// (normal case)
prev_row = row_ids[i - 1];
} else {
// i == 0
row_splits[num_rows] = num_elems;
prev_row = -1;
}
K2_CHECK_LE(this_row, prev_row + 1); // no_empty_rows was asserted by
// the user
if (this_row > prev_row) {
row_splits[this_row] = i;
}
};
Eval(c, num_elems, lambda_simple);
return;
} else {
// By doing "+ 2" instead of "+ 1" we increase the minimum number of
// threads-per-row, which may reduce latency when there are successive
// empty rows. Any value >= 1 is correct though.
int32_t avg_rows_per_elem = num_rows / num_elems + 2,
threads_per_elem = RoundUpToNearestPowerOfTwo(avg_rows_per_elem),
tot_threads =
(num_elems + 1) * threads_per_elem; // +1 for the last row
int32_t block_size = 256;
int32_t grid_size = NumBlocks(tot_threads, block_size);
hipLaunchKernelGGL(( K2_CUDA_SAFE_CALL(RowIdsToRowSplitsKernel), dim3(grid_size), dim3(block_size), 0,
c->GetCudaStream(),
num_elems, threads_per_elem, row_ids, num_rows, row_splits));
}
}
}
/*
Called inside GetTaskRedirect(); see documentation of that in header.
Each task with 0 <= task < num_tasks gets allocated `threads_per_job`
threads, e.g. threads_per_job = 4 or 16. It's a kind of n-ary
search (generalization of binary search) where each branch is handled
by a different thread so they can happen in parallel.
TODO(dan): there are a lot of opportunities to further optimize this
using GPU hardware tricks.
The thread-block size this is called with must be jobs_per_block *
threads_per_job.
*/
/*
template <int32_t jobs_per_block, int32_t threads_per_job>
__global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits,
TaskRedirect *redirect_out) {
__shared__ int32_t temp[tasks_per_block];
// we do __syncwarp() for synchronization below; we require threads_per_job <=
// 32 for this reason.
static_assert(threads_per_job >= 2 && threads_per_job <= 32);
// We have work to do for 0 <= job_idx < num_tasks, but be careful: job_idx
// may be >= num_tasks if num_tasks is small or not a power of two (we don't
// return because we need to do __syncwarp()). So we have to avoid out of
// bounds memory access.
int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x) / threads_per_job;
// `branch_idx` is which member we are of the group of the `threads_per_job`
threads for this job. int32_t branch_idx = threadIdx.x % threads_per_job; // we
assume blockDim.x % threads_per_job == 0
// `temp_idx` is which index in the temporary storage `temp` we are assigned
// (one per job).
int32_t temp_idx = threadIdx.x / threads_per_job;
// TODO: we may at some point decide that row_splits[0] has to be zero.
int32_t row_splits0 = row_splits[0],
row_splits_nt = row_splits[num_tasks],
num_items = row_splits_nt - row_splits0;
if (num_items <= 0) {
assert(num_items == 0);
// This is a special case where there is no work to do; we give a trivial
// assignment of tasks to jobs and return
static_assert(threads_per_job >= 2);
if (branch_idx < 2 && job_idx < num_tasks) {
TaskRedirect tr { job_idx, 2, branch_idx };
redirect_out[job_idx + branch_idx * num_tasks] = tr;
}
return;
} else if (branch_idx == 0 && job_idx < num_tasks) {
// This code writes to the jobs in the first half of the output array,
// that are allocated to the same-numbered task.
int32_t task_idx = job_idx,
this_row_split = row_splits[task_idx],
next_row_split = row_splits[task_idx + 1];
// `num_jobs` below is the number of jobs that will be active for
// this task. (The "1 +".. is the job that we assign for each
// task, one job per task, in the "first half" of the jobs).
// the job_idx we're working out below is the job_idx for the
// "second half" of
int32_t num_jobs_this_task =
1 + (next_row_split/dart_separation - this_row_split/dart_separation);
TaskRedirect tr { task_idx, num_jobs_this_task, 0 };
redirect_out[task_idx] = tr;
}
// Now we have the less-trivial task of assigning the jobs in the 2nd half of
the
// output array to tasks (these are allocated roughly proportional to the
amount
// of work to do for that task).
// We do the selection by throwing darts at a dart-board, evenly spaced, and
seeing which task they correspond
// to. There are `num_tasks` darts).
// Note: we know dart_location < row_splits_nt because job_idx < num_tasks
and
// because integer division rounds down.
int32_t dart_separation = num_items / num_tasks,
dart_location = row_splits0 + job_idx * dart_separation;
// OK, from this point the goal is to find a task_idx such that
// row_splits[task_idx] <= dart_location < row_splits[task_idx + 1].
// This is guaranteed to exist, as long as job_id < num_tasks.
// As long as job_id < num_tasks, we maintain the property that
// row_splits[lower_bound] <= dart_location &&
// (upper_bound > num_tasks || row_splits[upper_bound] > dart_location).
// (where upper_bound == lower_bound + range), i.e. they are truly
// lower and upper bounds
int32_t lower_bound = 0,
range = num_tasks; // we are responsible for items lower_bound through
// (upper_bound = lower_bound + range) - 1.
while (range > threads_per_job) {
int32_t upper_bound = lower_bound + range;
// We need to narrow the range of `task_idx` that might be the correct one.
// We round *up* because we require that task_idx_step * threads_per_job
>=
// range, so that we cover the entire range.
int32_t task_idx_step = (range + threads_per_job - 1) / threads_per_job, //
>= 2 my_lower_task_idx = lower_bound + branch_idx * task_idx_step,
my_upper_task_idx = my_lower_task_idx + task_idx_step;
// The following avoids out-of-bounds memory accesses.
if (my_upper_task_idx > upper_bound)
my_upper_task_idx = upper_bound;
// TODO (dan): it may be possible to use one of those special within-warp
// commands involving bitmaps to make the second comparison (dart_location <
// row_splits[my_upper_task_idx]) unnecessary.
if (my_lower_task_idx < num_tasks && row_splits[my_lower_task_idx] <=
dart_location && dart_location < row_splits[my_upper_task_idx]) {
// I am the "chosen branch" (exactly one will be chosen, as long as
// job_idx < num_tasks).
temp[temp_idx] = branch_idx;
}
__syncwarp();
int32_t chosen_branch_idx = temp[temp_idx];
lower_bound = lower_bound + chosen_branch_idx * task_idx_step;
upper_bound = lower_bound + task_idx_step;
range = task_idx_step;
// note, we don't limit upper_bound to be <= num_tasks because we need all
// threads in the block to go around the while loop the same number of
// times. Therefore it's possible that upper_bound > num_tasks.
K2_DASSERT(job_idx >= num_tasks ||
(row_splits[lower_bound] <= dart_location &&
(upper_bound > num_tasks || row_splits[upper_bound] >
dart_location))); // TODO: remove once debugged.
}
int32_t task_idx = lower_bound + branch_idx;
// TODO (dan): it may be possible to use one of those special within-warp
// commands involving bitmaps to make the second comparison (dart_location <
// row_splits[my_upper_task_idx]) unnecessary.
//
// The check `task_idx < num_tasks` is to avoid out-of-bounds access of
row_splits.
// The check `job_idx < num_tasks` is to avoid out-of-bounds access of
`redirect_out`;
// for these out-of-range job_idx values, it's possible for task_idx to have
// any value since it may be uninitialized memory.
if (task_idx < num_tasks && job_idx < num_tasks) {
int32_t this_row_split = row_splits[task_idx],
next_row_split = row_splits[task_idx + 1];
if (this_row_split <= dart_location && dart_location < next_row_split) {
// OK, exactly one branch per job will reach this point. `num_jobs` below
// is the number of jobs that will be active for this task. (The "1
// +".. is the job that we assign for each task, one job per task, in the
// "first half" of the jobs). The job_id_this_task we're working out
// below is the job_id within the second half of the TaskRedirects,
// the half that are allocated by throwing darts.
int32_t num_jobs_this_task =
1 + (next_row_split/dart_separation - this_row_split/dart_separation),
job_idx_this_task = 1 + (dart_location -
this_row_split)/dart_separation; K2_CHECK(job_id_this_task <
num_jobs_this_task); TaskRedirect tr { task_idx, num_jobs_this_task,
job_idx_this_task }; redirect_out[num_tasks + job_idx] = tr;
}
}
}
*/
/*
This is a quite simple implementation of GetTaskRedirect... I had a more
complicated one above that had better O(N) performance for hard cases, but
this one will handle more normal/smaller cases better, plus is easier to
debug. The basic idea is to throw lots of threads at it,
i.e. threads_per_task should be, say, twice larger than the average / expected
number of jobs per task, so that if a task has lots of jobs it doesn't have to
loop too many times.
*/
template <int32_t threads_per_task>
__global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits,
TaskRedirect *redirect_out) {
int32_t thread = blockIdx.x * blockDim.x + threadIdx.x;
int32_t task_idx = thread / threads_per_task;
if (task_idx >= num_tasks) return;
// `thread_idx` is which member we are of the group of the `threads_per_job`
// threads for this job.
int32_t thread_idx = thread % threads_per_task;
int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks],
num_items = row_splits_nt - row_splits0; // the 'num_items' is the
// total amount of work to
// do, that we want to
// distribute fairly evenly.
// The idea with `dart_separation` is this: Half of the jobs we allocate to
// the corresponding tasks. The other half we allocate by throwing darts onto
// the interval [0, num_items - 1], evenly spaced starting from 0, and seeing
// which tasks they land in. This is somewhat random but it ensures that if
// any task has a very large amount of work to do, it will get a roughly
// proportionate number of jobs.
int32_t dart_separation = num_items / num_tasks;
if (dart_separation <= 0) {
// This is a special case where there is no work to do; we give a trivial
// assignment of tasks to jobs and return
static_assert(threads_per_task >= 2, "threads per task must >= 2");
if (thread_idx < 2) {
TaskRedirect tr{task_idx, 2, static_cast<uint16_t>(thread_idx)};
redirect_out[task_idx + thread_idx * num_tasks] = tr;
}
return;
}
// TODO(dan): IDK how well the hardware combines these memory requests; could
// consider loading to shared memory first.
int32_t this_row_split = row_splits[task_idx],
next_row_split = row_splits[task_idx + 1];
// `num_jobs` below is the number of jobs that will be active for
// this task. (The "1 +".. is the job that we assign for each
// task, one job per task, in the "first half" of the jobs).
// the job_idx we're working out below is the job_idx for the
// "second half" of
int32_t num_jobs_this_task =
1 + (min(next_row_split / dart_separation, num_tasks) -
min(this_row_split / dart_separation,
num_tasks)); // function `min` is from cuda
K2_CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)),
num_jobs_this_task);
for (int32_t job_id_this_task = thread_idx;
job_id_this_task < num_jobs_this_task;
job_id_this_task += threads_per_task) {
int32_t job_idx = (job_id_this_task == 0 ? task_idx : // 1st half
num_tasks + (this_row_split / dart_separation) +
job_id_this_task - 1); // 2nd half.
redirect_out[job_idx] =
TaskRedirect{task_idx, static_cast<uint16_t>(num_jobs_this_task),
static_cast<uint16_t>(job_id_this_task)};
}
}
void GetTaskRedirect(hipStream_t stream, int32_t num_tasks,
const int32_t *row_splits, TaskRedirect *redirect_out) {
NVTX_RANGE(__func__);
if (num_tasks <= 0) return;
if (stream == kCudaStreamInvalid) {
// there's not much point in using this on CPU as there are better ways
// to do things (sequentially), but this can be useful for debugging.
// The idea with `dart_separation` is this: Half of the jobs we allocate
// to the corresponding tasks. The other half we allocate by throwing
// darts onto the interval [0, num_items - 1], evenly spaced starting from
// 0, and seeing which tasks they land in. This is somewhat random but it
// ensures that if any task has a very large amount of work to do, it will
// get a roughly proportionate number of jobs.
int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks],
num_items = row_splits_nt - row_splits0,
dart_separation = num_items / num_tasks;
if (dart_separation != 0) {
for (int32_t task = 0; task < num_tasks; ++task) {
int32_t this_row_split = row_splits[task],
next_row_split = row_splits[task + 1];
int32_t num_jobs_this_task =
1 + (::min(next_row_split / dart_separation, num_tasks) -
::min(this_row_split / dart_separation, num_tasks));
K2_CHECK_EQ(
static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)),
num_jobs_this_task);
for (int32_t job_id_this_task = 0;
job_id_this_task < num_jobs_this_task; ++job_id_this_task) {
int32_t job_idx =
(job_id_this_task == 0 ? task : // 1st half
num_tasks + (this_row_split / dart_separation) +
job_id_this_task - 1); // 2nd half.
redirect_out[job_idx] =
TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task),
static_cast<uint16_t>(job_id_this_task)};
}
}
} else {
// This is a special case where there is no work to do; we give a trivial
// assignment of tasks to jobs and return
for (int32_t task = 0; task < num_tasks; ++task) {
int32_t num_jobs_this_task = 2;
for (int32_t job_id_this_task = 0;
job_id_this_task < num_jobs_this_task; ++job_id_this_task) {
int32_t job_idx = task + job_id_this_task * num_tasks;
redirect_out[job_idx] =
TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task),
static_cast<uint16_t>(job_id_this_task)};
}
}
}
} else {
// compare 8 to 2, which is the expected number of jobs per task. having
// 8 substantially greater than 2 gives a fairly big safety factor.
// However this is still far from ideal in scenarios where the number of
// tasks might be highly unbalanced.
const int32_t threads_per_task = 8,
tot_threads = threads_per_task * num_tasks;
int32_t block_size = 256;
int32_t grid_size = NumBlocks(tot_threads, block_size);
hipLaunchKernelGGL(( K2_CUDA_SAFE_CALL(GetTaskRedirect<threads_per_task>)
, dim3(grid_size), dim3(block_size), 0, stream,
num_tasks, row_splits, redirect_out));
}
}
void GetTaskRedirect(ContextPtr &c, int32_t num_tasks,
const int32_t *row_splits, TaskRedirect *redirect_out) {
GetTaskRedirect(c->GetCudaStream(), num_tasks, row_splits, redirect_out);
}
} // namespace k2
| bb4d8fe037ddd85f5b26d49a6814bcea0444560a.cu | /**
* @brief
* utils
*
* @copyright
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <algorithm>
#include "k2/csrc/math.h"
#include "k2/csrc/utils.h"
namespace k2 {
// See FillValues() where this is invoked. It fills a region with
// a constant value.
__global__ void FillValuesKernel(int32_t *data, int32_t num_values,
int32_t value) {
int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x),
stride = (gridDim.x * blockDim.x);
for (; job_idx < num_values; job_idx += stride) data[job_idx] = value;
}
// This launches a kernel. It's the same as doing:
// for (int32_t i = 0; i < num_values; i++) data[i] = value;
__device__ void FillValues(int32_t *data, int32_t num_values, int32_t value) {
int32_t block_size = 256;
int32_t grid_size = NumBlocks(num_values, block_size);
FillValuesKernel<<<grid_size, block_size>>>(data, num_values, value);
}
// When we invoke this we make a big enough grid that there doesn't have to
// be a loop over rows, i.e. (gridDim.x * blockDim.x) / threads_per_row >=
// num_rows
__global__ void RowSplitsToRowIdsKernel(int32_t num_rows,
int32_t threads_per_row,
const int32_t *row_splits,
int32_t num_elems, int32_t *row_ids) {
int32_t thread = blockIdx.x * blockDim.x + threadIdx.x,
num_threads = gridDim.x * blockDim.x, row = thread / threads_per_row,
thread_this_row = thread % threads_per_row;
if (row >= num_rows) return;
K2_CHECK_GE(num_threads / threads_per_row, num_rows);
int32_t this_row_split = row_splits[row],
next_row_split = row_splits[row + 1],
row_length = next_row_split - this_row_split;
const int32_t max_loop = 8; // `max_loop` is heuristically chosen.
if (row_length / threads_per_row > max_loop) {
// We decide that looping too many times will be too slow, so we launch
// another kernel to fill in the value for this row. (This is CUDA dynamic
// parallelism).
if (thread_this_row == 0) {
FillValues(row_ids + this_row_split, row_length, row);
}
} else {
// TODO(dan): figure out how to unroll this?
for (; thread_this_row < row_length; thread_this_row += threads_per_row)
row_ids[this_row_split + thread_this_row] = row;
}
}
/*
See declaration of RowSplitsToRowIds() in utils.h. These are implementation
notes.
Suppose the range we need to fill with a
particular number (say, x) is from 1010 to 10000 inclusive (binary) The
first kernel writes x to positions 1010, 1100, 10000; the significance of
that sequence is we keep adding the smallest number we can add to get
another zero at the end of the binary representation, until we exceed the
range we're supposed to fill. The second kernel: for a given index into x
that is must fill (say, 1111), it asks "is the index currently here already
the right one?", which it can test using the function is_valid_index()
below; if it's not already correct, it searches in a sequence of positions:
1110, 1100, 1000, 0000, like our sequence above but going downwards, again
getting more zeros at the end of the binary representation, until it finds
the correct value in the array at the searched position; then it copies the
discovered value the original position requested (here, 1111).
First kernel pseudocode: for each index 'i' into 't', it does:
for (int32_t n=0, j = t[i]; j < t[i+1]; n++) {
x[j] = i;
if (j & (1<<n)) j += (1 << n);
}
Second kernel pseudocode: for each element of x, it searches for the right
index. Suppose we're given num_indexes == length(n) == length(t) - 1. Define
is_valid_index as follows:
// returns true if j is the value that we should be putting at position
'i' in x:
// that is, if t[j] <= i < t[j+1].
bool is_valid_index(i, j) {
return (j >= 0 && j < num_indexes && t[j] <= i && i < t[j+1]);
}
// We suppose we are given i (the position into x that we're responsible
for
// setting:
orig_i = i;
for (int32_t n=0; !is_valid_index(i, x[i]); n++) {
if (i & (1<<n)) i -= (1 << n);
}
x[orig_i] = x[i];
*/
void RowSplitsToRowIds(ContextPtr &c, int32_t num_rows,
const int32_t *row_splits, int32_t num_elems,
int32_t *row_ids) {
NVTX_RANGE(__func__);
if (num_rows <= 0 || num_elems <= 0) return;
DeviceType d = c->GetDeviceType();
if (d == kCpu) {
int32_t cur_row_start = row_splits[0];
K2_CHECK_EQ(cur_row_start, 0);
K2_CHECK_EQ(row_splits[num_rows], num_elems);
for (int32_t row = 0; row < num_rows; ++row) {
int32_t next_row_start = row_splits[row + 1];
for (; cur_row_start < next_row_start; ++cur_row_start)
row_ids[cur_row_start] = row;
}
} else {
K2_CHECK_EQ(d, kCuda);
if (1) {
// TODO: compare this for speed with the other branch. This is branch is
// much simpler, and will be considerably faster for "normal" cases ->
// probably preferred.
int32_t avg_elems_per_row = (num_elems + num_rows - 1) / num_rows,
threads_per_row = RoundUpToNearestPowerOfTwo(avg_elems_per_row),
tot_threads = num_rows * threads_per_row;
int32_t block_size = 256;
int32_t grid_size = NumBlocks(tot_threads, block_size);
K2_CUDA_SAFE_CALL(RowSplitsToRowIdsKernel<<<grid_size, block_size, 0,
c->GetCudaStream()>>>(
num_rows, threads_per_row, row_splits, num_elems, row_ids));
} else {
// TODO: Will probably just delete this branch at some point.
// The following algorithm isn't particularly adapted to GPU hardware in
// terms of coalesced reads and writes and so on, but it has reasonable
// asymptotic time complexity (assuming all kernels run in parallel),
// specifically: O(log(largest(row_splits[i+1]-row_splits[i])))
auto lambda_init_minus_one = [=] __host__ __device__(int32_t i) {
row_ids[i] = -1;
};
Eval(c, num_elems + 1, lambda_init_minus_one);
auto lambda_phase_one = [=] __host__ __device__(int32_t i) {
int32_t this_row_split = row_splits[i],
next_row_split =
(i < num_rows ? row_splits[i + 1] : this_row_split + 1);
if (this_row_split < next_row_split) row_ids[this_row_split] = i;
// we have to fill in row_ids[this_row_split],
// row_ids[this_row_split+1]... row_ids[next_row_split-1] with the same
// value but that could be a long loop. Instead we write at
// this_row_split and all indexes this_row_split < i < next_row_split
// such that i is the result of rounding up this_row_split to
// (something)*2^n, for n = 1, 2, 3, ... this will take time logarithmic
// in (next_row_split - this_row_split). we can then fill in the gaps
// with a logarithmic-time loop, by looking for a value that's not (-1)
// by rounding the current index down to successively higher powers
// of 2.
for (int32_t power = 0, j = this_row_split;
j + (1 << power) < next_row_split; power++) {
if (j & (1 << power)) {
j += (1 << power);
// we know that j is now < next_row_split, because we checked "j +
// (1<<power) < next_row_split" in the loop condition.
// Note, we don't want a loop-within-a-loop because of how SIMT
// works...
row_ids[j] = i;
}
}
};
Eval(c, num_elems + 1, lambda_phase_one);
auto lambda_phase_two = [=] __host__ __device__(int32_t j) {
int32_t row_index = row_ids[j];
if (row_index != -1) return;
int32_t power = 0, j2 = j;
for (; row_index != -1; power++) {
if (j2 & (1 << power)) {
j2 -= (1 << power);
row_index = row_ids[j2];
}
assert(power < 31);
}
row_ids[j] = row_ids[j2];
};
// could do the next line for num_elems+1, but the element at `num_elems`
// will already be set.
Eval(c, num_elems, lambda_phase_two);
}
}
}
/*
When we invoke this we make a big enough grid that there doesn't have to
be a loop over elements, i.e. (gridDim.x * blockDim.x) / threads_per_elem >
num_elems. (must be >=, because we imagine a phantom element at [num_elems]
with the value `num_rows`.)
@param [in] num_elems Number of elements in ragged matrix
@param [in] threads_per_elem Number of threads we allocate per element.
Must be >= 1.
@param [in] row_ids The row_ids vector, of length `num_elems`;
must be nonnegative and non-decreasing and
all elements < num_rows.
@param [in] num_rows Number of rows, must be greater than the
largest (== last) element of `row_ids`.
@param [out] row_splits This kernel will output a non-decreasing
vector of length num_rows + 1, such that
row_splits[0] == 0,
row_splits[num_rows] == num_elems,
and row_splits[row_ids[i]] <= i <
row_splits[row_ids[i]+1]
*/
__global__ void RowIdsToRowSplitsKernel(int32_t num_elems,
int32_t threads_per_elem,
const int32_t *row_ids,
int32_t num_rows, int32_t *row_splits) {
int32_t thread = (blockIdx.x * blockDim.x + threadIdx.x),
num_threads = gridDim.x * blockDim.x,
elem = thread / threads_per_elem,
thread_this_elem = thread % threads_per_elem;
K2_CHECK_GE(num_threads / threads_per_elem, num_elems);
if (elem > num_elems) return;
int32_t this_row, prev_row;
if (elem == 0) {
prev_row = -1;
this_row = row_ids[elem];
} else if (elem == num_elems) {
prev_row = row_ids[elem - 1];
this_row = num_rows;
} else {
prev_row = row_ids[elem - 1];
this_row = row_ids[elem];
}
// `num_splits` is the number of splits we have to write, usually 0 or 1
// but in principle unlimited as there could be empty rows. The
// relationship between row_ids and row_splits is more symmetric than
// you might expect.
int32_t num_splits = this_row - prev_row;
const int32_t max_loop = 8; // `max_loop` is heuristically chosen.
if (num_splits / threads_per_elem > max_loop) {
if (thread_this_elem == 0) {
FillValues(row_splits + prev_row + 1, num_splits, elem);
}
} else {
// TODO(dan): figure out how to unroll this?
for (; thread_this_elem < num_splits; thread_this_elem += threads_per_elem)
row_splits[prev_row + 1 + thread_this_elem] = elem;
}
}
// see declaration in utils.h for documentation.
void RowIdsToRowSplits(ContextPtr &c, int32_t num_elems, const int32_t *row_ids,
bool no_empty_rows, int32_t num_rows,
int32_t *row_splits) {
NVTX_RANGE(__func__);
// process corner case first
if (num_elems == 0) {
auto lambda_set_values = [=] __host__ __device__(int32_t i) {
row_splits[i] = 0;
};
Eval(c, num_rows + 1, lambda_set_values);
return;
}
DeviceType d = c->GetDeviceType();
if (d == kCpu) {
int32_t cur_row = -1;
for (int32_t i = 0; i < num_elems; i++) {
int32_t row = row_ids[i];
K2_CHECK_GE(row, cur_row);
while (cur_row < row) {
cur_row++;
row_splits[cur_row] = i;
}
}
// cur_row must be >= 0 here as num_elems > 0
K2_CHECK_GE(cur_row, 0);
while (cur_row < num_rows) {
row_splits[++cur_row] = num_elems;
}
} else {
K2_CHECK_EQ(d, kCuda);
if (no_empty_rows) {
auto lambda_simple = [=] __host__ __device__(int32_t i) {
int32_t this_row = row_ids[i], prev_row;
if (i > 0) {
// (normal case)
prev_row = row_ids[i - 1];
} else {
// i == 0
row_splits[num_rows] = num_elems;
prev_row = -1;
}
K2_CHECK_LE(this_row, prev_row + 1); // no_empty_rows was asserted by
// the user
if (this_row > prev_row) {
row_splits[this_row] = i;
}
};
Eval(c, num_elems, lambda_simple);
return;
} else {
// By doing "+ 2" instead of "+ 1" we increase the minimum number of
// threads-per-row, which may reduce latency when there are successive
// empty rows. Any value >= 1 is correct though.
int32_t avg_rows_per_elem = num_rows / num_elems + 2,
threads_per_elem = RoundUpToNearestPowerOfTwo(avg_rows_per_elem),
tot_threads =
(num_elems + 1) * threads_per_elem; // +1 for the last row
int32_t block_size = 256;
int32_t grid_size = NumBlocks(tot_threads, block_size);
K2_CUDA_SAFE_CALL(RowIdsToRowSplitsKernel<<<grid_size, block_size, 0,
c->GetCudaStream()>>>(
num_elems, threads_per_elem, row_ids, num_rows, row_splits));
}
}
}
/*
Called inside GetTaskRedirect(); see documentation of that in header.
Each task with 0 <= task < num_tasks gets allocated `threads_per_job`
threads, e.g. threads_per_job = 4 or 16. It's a kind of n-ary
search (generalization of binary search) where each branch is handled
by a different thread so they can happen in parallel.
TODO(dan): there are a lot of opportunities to further optimize this
using GPU hardware tricks.
The thread-block size this is called with must be jobs_per_block *
threads_per_job.
*/
/*
template <int32_t jobs_per_block, int32_t threads_per_job>
__global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits,
TaskRedirect *redirect_out) {
__shared__ int32_t temp[tasks_per_block];
// we do __syncwarp() for synchronization below; we require threads_per_job <=
// 32 for this reason.
static_assert(threads_per_job >= 2 && threads_per_job <= 32);
// We have work to do for 0 <= job_idx < num_tasks, but be careful: job_idx
// may be >= num_tasks if num_tasks is small or not a power of two (we don't
// return because we need to do __syncwarp()). So we have to avoid out of
// bounds memory access.
int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x) / threads_per_job;
// `branch_idx` is which member we are of the group of the `threads_per_job`
threads for this job. int32_t branch_idx = threadIdx.x % threads_per_job; // we
assume blockDim.x % threads_per_job == 0
// `temp_idx` is which index in the temporary storage `temp` we are assigned
// (one per job).
int32_t temp_idx = threadIdx.x / threads_per_job;
// TODO: we may at some point decide that row_splits[0] has to be zero.
int32_t row_splits0 = row_splits[0],
row_splits_nt = row_splits[num_tasks],
num_items = row_splits_nt - row_splits0;
if (num_items <= 0) {
assert(num_items == 0);
// This is a special case where there is no work to do; we give a trivial
// assignment of tasks to jobs and return
static_assert(threads_per_job >= 2);
if (branch_idx < 2 && job_idx < num_tasks) {
TaskRedirect tr { job_idx, 2, branch_idx };
redirect_out[job_idx + branch_idx * num_tasks] = tr;
}
return;
} else if (branch_idx == 0 && job_idx < num_tasks) {
// This code writes to the jobs in the first half of the output array,
// that are allocated to the same-numbered task.
int32_t task_idx = job_idx,
this_row_split = row_splits[task_idx],
next_row_split = row_splits[task_idx + 1];
// `num_jobs` below is the number of jobs that will be active for
// this task. (The "1 +".. is the job that we assign for each
// task, one job per task, in the "first half" of the jobs).
// the job_idx we're working out below is the job_idx for the
// "second half" of
int32_t num_jobs_this_task =
1 + (next_row_split/dart_separation - this_row_split/dart_separation);
TaskRedirect tr { task_idx, num_jobs_this_task, 0 };
redirect_out[task_idx] = tr;
}
// Now we have the less-trivial task of assigning the jobs in the 2nd half of
the
// output array to tasks (these are allocated roughly proportional to the
amount
// of work to do for that task).
// We do the selection by throwing darts at a dart-board, evenly spaced, and
seeing which task they correspond
// to. There are `num_tasks` darts).
// Note: we know dart_location < row_splits_nt because job_idx < num_tasks
and
// because integer division rounds down.
int32_t dart_separation = num_items / num_tasks,
dart_location = row_splits0 + job_idx * dart_separation;
// OK, from this point the goal is to find a task_idx such that
// row_splits[task_idx] <= dart_location < row_splits[task_idx + 1].
// This is guaranteed to exist, as long as job_id < num_tasks.
// As long as job_id < num_tasks, we maintain the property that
// row_splits[lower_bound] <= dart_location &&
// (upper_bound > num_tasks || row_splits[upper_bound] > dart_location).
// (where upper_bound == lower_bound + range), i.e. they are truly
// lower and upper bounds
int32_t lower_bound = 0,
range = num_tasks; // we are responsible for items lower_bound through
// (upper_bound = lower_bound + range) - 1.
while (range > threads_per_job) {
int32_t upper_bound = lower_bound + range;
// We need to narrow the range of `task_idx` that might be the correct one.
// We round *up* because we require that task_idx_step * threads_per_job
>=
// range, so that we cover the entire range.
int32_t task_idx_step = (range + threads_per_job - 1) / threads_per_job, //
>= 2 my_lower_task_idx = lower_bound + branch_idx * task_idx_step,
my_upper_task_idx = my_lower_task_idx + task_idx_step;
// The following avoids out-of-bounds memory accesses.
if (my_upper_task_idx > upper_bound)
my_upper_task_idx = upper_bound;
// TODO (dan): it may be possible to use one of those special within-warp
// commands involving bitmaps to make the second comparison (dart_location <
// row_splits[my_upper_task_idx]) unnecessary.
if (my_lower_task_idx < num_tasks && row_splits[my_lower_task_idx] <=
dart_location && dart_location < row_splits[my_upper_task_idx]) {
// I am the "chosen branch" (exactly one will be chosen, as long as
// job_idx < num_tasks).
temp[temp_idx] = branch_idx;
}
__syncwarp();
int32_t chosen_branch_idx = temp[temp_idx];
lower_bound = lower_bound + chosen_branch_idx * task_idx_step;
upper_bound = lower_bound + task_idx_step;
range = task_idx_step;
// note, we don't limit upper_bound to be <= num_tasks because we need all
// threads in the block to go around the while loop the same number of
// times. Therefore it's possible that upper_bound > num_tasks.
K2_DASSERT(job_idx >= num_tasks ||
(row_splits[lower_bound] <= dart_location &&
(upper_bound > num_tasks || row_splits[upper_bound] >
dart_location))); // TODO: remove once debugged.
}
int32_t task_idx = lower_bound + branch_idx;
// TODO (dan): it may be possible to use one of those special within-warp
// commands involving bitmaps to make the second comparison (dart_location <
// row_splits[my_upper_task_idx]) unnecessary.
//
// The check `task_idx < num_tasks` is to avoid out-of-bounds access of
row_splits.
// The check `job_idx < num_tasks` is to avoid out-of-bounds access of
`redirect_out`;
// for these out-of-range job_idx values, it's possible for task_idx to have
// any value since it may be uninitialized memory.
if (task_idx < num_tasks && job_idx < num_tasks) {
int32_t this_row_split = row_splits[task_idx],
next_row_split = row_splits[task_idx + 1];
if (this_row_split <= dart_location && dart_location < next_row_split) {
// OK, exactly one branch per job will reach this point. `num_jobs` below
// is the number of jobs that will be active for this task. (The "1
// +".. is the job that we assign for each task, one job per task, in the
// "first half" of the jobs). The job_id_this_task we're working out
// below is the job_id within the second half of the TaskRedirects,
// the half that are allocated by throwing darts.
int32_t num_jobs_this_task =
1 + (next_row_split/dart_separation - this_row_split/dart_separation),
job_idx_this_task = 1 + (dart_location -
this_row_split)/dart_separation; K2_CHECK(job_id_this_task <
num_jobs_this_task); TaskRedirect tr { task_idx, num_jobs_this_task,
job_idx_this_task }; redirect_out[num_tasks + job_idx] = tr;
}
}
}
*/
/*
This is a quite simple implementation of GetTaskRedirect... I had a more
complicated one above that had better O(N) performance for hard cases, but
this one will handle more normal/smaller cases better, plus is easier to
debug. The basic idea is to throw lots of threads at it,
i.e. threads_per_task should be, say, twice larger than the average / expected
number of jobs per task, so that if a task has lots of jobs it doesn't have to
loop too many times.
*/
template <int32_t threads_per_task>
__global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits,
TaskRedirect *redirect_out) {
int32_t thread = blockIdx.x * blockDim.x + threadIdx.x;
int32_t task_idx = thread / threads_per_task;
if (task_idx >= num_tasks) return;
// `thread_idx` is which member we are of the group of the `threads_per_job`
// threads for this job.
int32_t thread_idx = thread % threads_per_task;
int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks],
num_items = row_splits_nt - row_splits0; // the 'num_items' is the
// total amount of work to
// do, that we want to
// distribute fairly evenly.
// The idea with `dart_separation` is this: Half of the jobs we allocate to
// the corresponding tasks. The other half we allocate by throwing darts onto
// the interval [0, num_items - 1], evenly spaced starting from 0, and seeing
// which tasks they land in. This is somewhat random but it ensures that if
// any task has a very large amount of work to do, it will get a roughly
// proportionate number of jobs.
int32_t dart_separation = num_items / num_tasks;
if (dart_separation <= 0) {
// This is a special case where there is no work to do; we give a trivial
// assignment of tasks to jobs and return
static_assert(threads_per_task >= 2, "threads per task must >= 2");
if (thread_idx < 2) {
TaskRedirect tr{task_idx, 2, static_cast<uint16_t>(thread_idx)};
redirect_out[task_idx + thread_idx * num_tasks] = tr;
}
return;
}
// TODO(dan): IDK how well the hardware combines these memory requests; could
// consider loading to shared memory first.
int32_t this_row_split = row_splits[task_idx],
next_row_split = row_splits[task_idx + 1];
// `num_jobs` below is the number of jobs that will be active for
// this task. (The "1 +".. is the job that we assign for each
// task, one job per task, in the "first half" of the jobs).
// the job_idx we're working out below is the job_idx for the
// "second half" of
int32_t num_jobs_this_task =
1 + (min(next_row_split / dart_separation, num_tasks) -
min(this_row_split / dart_separation,
num_tasks)); // function `min` is from cuda
K2_CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)),
num_jobs_this_task);
for (int32_t job_id_this_task = thread_idx;
job_id_this_task < num_jobs_this_task;
job_id_this_task += threads_per_task) {
int32_t job_idx = (job_id_this_task == 0 ? task_idx : // 1st half
num_tasks + (this_row_split / dart_separation) +
job_id_this_task - 1); // 2nd half.
redirect_out[job_idx] =
TaskRedirect{task_idx, static_cast<uint16_t>(num_jobs_this_task),
static_cast<uint16_t>(job_id_this_task)};
}
}
void GetTaskRedirect(cudaStream_t stream, int32_t num_tasks,
const int32_t *row_splits, TaskRedirect *redirect_out) {
NVTX_RANGE(__func__);
if (num_tasks <= 0) return;
if (stream == kCudaStreamInvalid) {
// there's not much point in using this on CPU as there are better ways
// to do things (sequentially), but this can be useful for debugging.
// The idea with `dart_separation` is this: Half of the jobs we allocate
// to the corresponding tasks. The other half we allocate by throwing
// darts onto the interval [0, num_items - 1], evenly spaced starting from
// 0, and seeing which tasks they land in. This is somewhat random but it
// ensures that if any task has a very large amount of work to do, it will
// get a roughly proportionate number of jobs.
int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks],
num_items = row_splits_nt - row_splits0,
dart_separation = num_items / num_tasks;
if (dart_separation != 0) {
for (int32_t task = 0; task < num_tasks; ++task) {
int32_t this_row_split = row_splits[task],
next_row_split = row_splits[task + 1];
int32_t num_jobs_this_task =
1 + (std::min(next_row_split / dart_separation, num_tasks) -
std::min(this_row_split / dart_separation, num_tasks));
K2_CHECK_EQ(
static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)),
num_jobs_this_task);
for (int32_t job_id_this_task = 0;
job_id_this_task < num_jobs_this_task; ++job_id_this_task) {
int32_t job_idx =
(job_id_this_task == 0 ? task : // 1st half
num_tasks + (this_row_split / dart_separation) +
job_id_this_task - 1); // 2nd half.
redirect_out[job_idx] =
TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task),
static_cast<uint16_t>(job_id_this_task)};
}
}
} else {
// This is a special case where there is no work to do; we give a trivial
// assignment of tasks to jobs and return
for (int32_t task = 0; task < num_tasks; ++task) {
int32_t num_jobs_this_task = 2;
for (int32_t job_id_this_task = 0;
job_id_this_task < num_jobs_this_task; ++job_id_this_task) {
int32_t job_idx = task + job_id_this_task * num_tasks;
redirect_out[job_idx] =
TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task),
static_cast<uint16_t>(job_id_this_task)};
}
}
}
} else {
// compare 8 to 2, which is the expected number of jobs per task. having
// 8 substantially greater than 2 gives a fairly big safety factor.
// However this is still far from ideal in scenarios where the number of
// tasks might be highly unbalanced.
const int32_t threads_per_task = 8,
tot_threads = threads_per_task * num_tasks;
int32_t block_size = 256;
int32_t grid_size = NumBlocks(tot_threads, block_size);
K2_CUDA_SAFE_CALL(GetTaskRedirect<threads_per_task>
<<<grid_size, block_size, 0, stream>>>(
num_tasks, row_splits, redirect_out));
}
}
void GetTaskRedirect(ContextPtr &c, int32_t num_tasks,
const int32_t *row_splits, TaskRedirect *redirect_out) {
GetTaskRedirect(c->GetCudaStream(), num_tasks, row_splits, redirect_out);
}
} // namespace k2
|
84e631699f25cf6b2fd4d4d9b53ec32216f4206a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "Solve_redblack2_Kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
const float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int nChannels = 1;
int c = 2;
const float *weightx = NULL;
hipMalloc(&weightx, XSIZE*YSIZE);
const float *weighty = NULL;
hipMalloc(&weighty, XSIZE*YSIZE);
float lambda = 1;
float omega = 1;
bool redflag = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
Solve_redblack2_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input,width,height,nChannels,c,weightx,weighty,lambda,omega,redflag);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
Solve_redblack2_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input,width,height,nChannels,c,weightx,weighty,lambda,omega,redflag);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
Solve_redblack2_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input,width,height,nChannels,c,weightx,weighty,lambda,omega,redflag);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 84e631699f25cf6b2fd4d4d9b53ec32216f4206a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Solve_redblack2_Kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
const float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int nChannels = 1;
int c = 2;
const float *weightx = NULL;
cudaMalloc(&weightx, XSIZE*YSIZE);
const float *weighty = NULL;
cudaMalloc(&weighty, XSIZE*YSIZE);
float lambda = 1;
float omega = 1;
bool redflag = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Solve_redblack2_Kernel<<<gridBlock,threadBlock>>>(output,input,width,height,nChannels,c,weightx,weighty,lambda,omega,redflag);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Solve_redblack2_Kernel<<<gridBlock,threadBlock>>>(output,input,width,height,nChannels,c,weightx,weighty,lambda,omega,redflag);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Solve_redblack2_Kernel<<<gridBlock,threadBlock>>>(output,input,width,height,nChannels,c,weightx,weighty,lambda,omega,redflag);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
68a5fcb5d9dc32d8fe15f8473cd2cb24341e9b70.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "kernel.hip"
#include <cutil_inline.h>
// kernels transpose/copy a tile of TILE_DIM x TILE_DIM elements
// using a TILE_DIM x BLOCK_ROWS thread block, so that each thread
// transposes TILE_DIM/BLOCK_ROWS elements. TILE_DIM must be an
// integral multiple of BLOCK_ROWS
// Number of repetitions used for timing.
#define NUM_REPS 100
//extern "C" void (*computeTransposeGold)(float* transposeGold, float *h_idata, int size_x,int size_y);
void computeTransposeGold( float* transposeGold, float* h_idata,
const int size_x, const int size_y )
{
// transpose matrix
for( int y = 0; y < size_y; ++y)
{
for( int x = 0; x < size_x; ++x)
{
transposeGold[(x * size_y) + y] = h_idata[(y * size_x) + x];
}
}
}
int
main( int argc, char** argv)
{
/* printf("Enter the size_x and size_y\n");
scanf("%d,%d", &size_x,size_y);*/
// set matrix size
//int size_x = 1024 , size_y = 1024;
int size_x = 0, size_y = 0;
int temp_BLOCK_ROWS = 0;
if(argc > 1)
{
size_x = atoi(argv[1]);
size_y = atoi(argv[2]);
//temp_TILE_DIM = atoi(argv[3]);
temp_BLOCK_ROWS = atoi(argv[3]);
}
//const int TILE_DIM = temp_TILE_DIM;
const int BLOCK_ROWS = temp_BLOCK_ROWS;
// kernel pointer and descriptor
void (*kernel)(float *, float *,int,int,int,const int);
char *kernelName;
const int mem_size = sizeof(float) * size_x*size_y;
// allocate host memory
float *h_idata = (float*) malloc(mem_size);
float *h_odata = (float*) malloc(mem_size);
float *transposeGold = (float *) malloc(mem_size);
float *gold;
// float *compareF(float *,float *,const int);
// execution configuration parameters
dim3 grid(size_x/TILE_DIM, size_y/TILE_DIM),
threads(TILE_DIM,BLOCK_ROWS);
// CUDA events
hipEvent_t start, stop;
// size of memory required to store the matrix
// allocate device memory
float *d_idata, *d_odata;
hipMalloc( (void**) &d_idata, mem_size);
hipMalloc( (void**) &d_odata, mem_size);
// initalize host data computeTransposeGold
for(int i = 0; i < (size_x*size_y); ++i)
h_idata[i] = (float) i;
// copy host data to device
hipMemcpy(d_idata, h_idata, mem_size,
hipMemcpyHostToDevice );
// Compute reference transpose solution
computeTransposeGold(transposeGold, h_idata, size_x, size_y);
// print out common data for all kernels
printf("\nMatrix size: %dx%d, tile: %dx%d, block: %dx%d\n\n",
size_x, size_y, TILE_DIM, TILE_DIM, TILE_DIM, BLOCK_ROWS);
printf("Kernel\t\t\tLoop over kernel\tLoop within kernel\tLoop over kernel Time\t\tLoop within kernel Time\n");
printf("------\t\t\t----------------\t------------------\t---------------------\t\t-----------------------\n");
//
// loop over different kernels
//
for (int k = 0; k<8; k++) {
// set kernel pointer
switch (k) {
case 0:
kernel = ©
kernelName = "simple copy "; break;
case 1:
kernel = ©SharedMem;
kernelName = "shared memory copy "; break;
case 2:
kernel = &transposeCoalesced;
kernelName = "coalesced transpose "; break;
case 3:
kernel = &transposeNaive;
kernelName = "naive transpose "; break;
case 4:
kernel = &transposeNoBankConflicts;
kernelName = "no bank conflict trans"; break;
case 5:
kernel = &transposeCoarseGrained;
kernelName = "coarse-grained "; break;
case 6:
kernel = &transposeFineGrained;
kernelName = "fine-grained "; break;
case 7:
kernel = &transposeDiagonal;
kernelName = "diagonal transpose "; break;
}
// set reference solution
// NB: fine- and coarse-grained kernels are not full
// transposes, so bypass check
if (kernel == © || kernel == ©SharedMem) {
gold = h_idata;
} else if (kernel == &transposeCoarseGrained ||
kernel == &transposeFineGrained) {
gold = h_odata;
} else {
gold = transposeGold;
}
// initialize events, EC parameters
hipEventCreate(&start);
hipEventCreate(&stop);
// warmup to avoid timing startup
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata, size_x,size_y, 1, BLOCK_ROWS);
// take measurements for loop over kernel launches
hipEventRecord(start, 0);
for (int i=0; i < NUM_REPS; i++) {
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata,size_x,size_y,1, BLOCK_ROWS);
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float outerTime;
hipEventElapsedTime(&outerTime, start, stop);
hipMemcpy(h_odata,d_odata, mem_size, hipMemcpyDeviceToHost);
CUTBoolean res = cutComparef(gold, h_odata, size_x*size_y);
if (res != 1)
printf("*** %s kernel FAILED ***\n", kernelName);
else
printf("***Loop over kernel test PASSED***\n");
// take measurements for loop inside kernel
hipEventRecord(start, 0);
hipLaunchKernelGGL(( kernel), dim3(grid),dim3(threads), 0, 0,
d_odata, d_idata, size_x, size_y, NUM_REPS, BLOCK_ROWS);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float innerTime;
hipEventElapsedTime(&innerTime, start, stop);
hipMemcpy(h_odata,d_odata, mem_size, hipMemcpyDeviceToHost);
res = cutComparef(gold, h_odata, size_x*size_y);
if (res != 1)
printf("*** %s kernel FAILED ***\n", kernelName);
else
printf("***Loop over kernel test PASSED***\n");
// report effective bandwidths
float outerBandwidth =
2.0f*1000.0f*mem_size/(1024*1024*1024)/(outerTime/NUM_REPS);
float innerBandwidth =
2.0f*1000.0f*mem_size/(1024*1024*1024)/(innerTime/NUM_REPS);
float loopOuterTime = outerTime/(NUM_REPS);
float loopInnerTime = innerTime/(NUM_REPS);
printf("%s\t%5.2f GB/s\t\t%5.2f GB/s\t\t%.5f ms\t\t\t%.5f ms\n",
kernelName, outerBandwidth, innerBandwidth,loopOuterTime, loopInnerTime);
}
// cleanup
free(h_idata); free(h_odata); free(transposeGold);
hipFree(d_idata); hipFree(d_odata);
hipEventDestroy(start); hipEventDestroy(stop);
return 0;
} | 68a5fcb5d9dc32d8fe15f8473cd2cb24341e9b70.cu | #include <stdio.h>
#include "kernel.cu"
#include <cutil_inline.h>
// kernels transpose/copy a tile of TILE_DIM x TILE_DIM elements
// using a TILE_DIM x BLOCK_ROWS thread block, so that each thread
// transposes TILE_DIM/BLOCK_ROWS elements. TILE_DIM must be an
// integral multiple of BLOCK_ROWS
// Number of repetitions used for timing.
#define NUM_REPS 100
//extern "C" void (*computeTransposeGold)(float* transposeGold, float *h_idata, int size_x,int size_y);
void computeTransposeGold( float* transposeGold, float* h_idata,
const int size_x, const int size_y )
{
// transpose matrix
for( int y = 0; y < size_y; ++y)
{
for( int x = 0; x < size_x; ++x)
{
transposeGold[(x * size_y) + y] = h_idata[(y * size_x) + x];
}
}
}
int
main( int argc, char** argv)
{
/* printf("Enter the size_x and size_y\n");
scanf("%d,%d", &size_x,size_y);*/
// set matrix size
//int size_x = 1024 , size_y = 1024;
int size_x = 0, size_y = 0;
int temp_BLOCK_ROWS = 0;
if(argc > 1)
{
size_x = atoi(argv[1]);
size_y = atoi(argv[2]);
//temp_TILE_DIM = atoi(argv[3]);
temp_BLOCK_ROWS = atoi(argv[3]);
}
//const int TILE_DIM = temp_TILE_DIM;
const int BLOCK_ROWS = temp_BLOCK_ROWS;
// kernel pointer and descriptor
void (*kernel)(float *, float *,int,int,int,const int);
char *kernelName;
const int mem_size = sizeof(float) * size_x*size_y;
// allocate host memory
float *h_idata = (float*) malloc(mem_size);
float *h_odata = (float*) malloc(mem_size);
float *transposeGold = (float *) malloc(mem_size);
float *gold;
// float *compareF(float *,float *,const int);
// execution configuration parameters
dim3 grid(size_x/TILE_DIM, size_y/TILE_DIM),
threads(TILE_DIM,BLOCK_ROWS);
// CUDA events
cudaEvent_t start, stop;
// size of memory required to store the matrix
// allocate device memory
float *d_idata, *d_odata;
cudaMalloc( (void**) &d_idata, mem_size);
cudaMalloc( (void**) &d_odata, mem_size);
// initalize host data computeTransposeGold
for(int i = 0; i < (size_x*size_y); ++i)
h_idata[i] = (float) i;
// copy host data to device
cudaMemcpy(d_idata, h_idata, mem_size,
cudaMemcpyHostToDevice );
// Compute reference transpose solution
computeTransposeGold(transposeGold, h_idata, size_x, size_y);
// print out common data for all kernels
printf("\nMatrix size: %dx%d, tile: %dx%d, block: %dx%d\n\n",
size_x, size_y, TILE_DIM, TILE_DIM, TILE_DIM, BLOCK_ROWS);
printf("Kernel\t\t\tLoop over kernel\tLoop within kernel\tLoop over kernel Time\t\tLoop within kernel Time\n");
printf("------\t\t\t----------------\t------------------\t---------------------\t\t-----------------------\n");
//
// loop over different kernels
//
for (int k = 0; k<8; k++) {
// set kernel pointer
switch (k) {
case 0:
kernel = ©
kernelName = "simple copy "; break;
case 1:
kernel = ©SharedMem;
kernelName = "shared memory copy "; break;
case 2:
kernel = &transposeCoalesced;
kernelName = "coalesced transpose "; break;
case 3:
kernel = &transposeNaive;
kernelName = "naive transpose "; break;
case 4:
kernel = &transposeNoBankConflicts;
kernelName = "no bank conflict trans"; break;
case 5:
kernel = &transposeCoarseGrained;
kernelName = "coarse-grained "; break;
case 6:
kernel = &transposeFineGrained;
kernelName = "fine-grained "; break;
case 7:
kernel = &transposeDiagonal;
kernelName = "diagonal transpose "; break;
}
// set reference solution
// NB: fine- and coarse-grained kernels are not full
// transposes, so bypass check
if (kernel == © || kernel == ©SharedMem) {
gold = h_idata;
} else if (kernel == &transposeCoarseGrained ||
kernel == &transposeFineGrained) {
gold = h_odata;
} else {
gold = transposeGold;
}
// initialize events, EC parameters
cudaEventCreate(&start);
cudaEventCreate(&stop);
// warmup to avoid timing startup
kernel<<<grid, threads>>>(d_odata, d_idata, size_x,size_y, 1, BLOCK_ROWS);
// take measurements for loop over kernel launches
cudaEventRecord(start, 0);
for (int i=0; i < NUM_REPS; i++) {
kernel<<<grid, threads>>>(d_odata, d_idata,size_x,size_y,1, BLOCK_ROWS);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float outerTime;
cudaEventElapsedTime(&outerTime, start, stop);
cudaMemcpy(h_odata,d_odata, mem_size, cudaMemcpyDeviceToHost);
CUTBoolean res = cutComparef(gold, h_odata, size_x*size_y);
if (res != 1)
printf("*** %s kernel FAILED ***\n", kernelName);
else
printf("***Loop over kernel test PASSED***\n");
// take measurements for loop inside kernel
cudaEventRecord(start, 0);
kernel<<<grid,threads>>>
(d_odata, d_idata, size_x, size_y, NUM_REPS, BLOCK_ROWS);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float innerTime;
cudaEventElapsedTime(&innerTime, start, stop);
cudaMemcpy(h_odata,d_odata, mem_size, cudaMemcpyDeviceToHost);
res = cutComparef(gold, h_odata, size_x*size_y);
if (res != 1)
printf("*** %s kernel FAILED ***\n", kernelName);
else
printf("***Loop over kernel test PASSED***\n");
// report effective bandwidths
float outerBandwidth =
2.0f*1000.0f*mem_size/(1024*1024*1024)/(outerTime/NUM_REPS);
float innerBandwidth =
2.0f*1000.0f*mem_size/(1024*1024*1024)/(innerTime/NUM_REPS);
float loopOuterTime = outerTime/(NUM_REPS);
float loopInnerTime = innerTime/(NUM_REPS);
printf("%s\t%5.2f GB/s\t\t%5.2f GB/s\t\t%.5f ms\t\t\t%.5f ms\n",
kernelName, outerBandwidth, innerBandwidth,loopOuterTime, loopInnerTime);
}
// cleanup
free(h_idata); free(h_odata); free(transposeGold);
cudaFree(d_idata); cudaFree(d_odata);
cudaEventDestroy(start); cudaEventDestroy(stop);
return 0;
} |
b63e203e510f6ecf19a93edd4f11813301ad6e46.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <algorithm>
#include <iostream>
#ifdef _WIN32
#include <numeric>
#endif
#include <random>
#define PADDLE_CUDA_FP16
#include "paddle/fluid/platform/cuda_device_function.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/cuda_helper.h"
using paddle::platform::PADDLE_CUDA_NUM_THREADS;
using paddle::platform::float16;
template <typename T>
__global__ void AddKernel(const T* data_a, T* data_b, size_t num) {
CUDA_KERNEL_LOOP(i, num) {
paddle::platform::CudaAtomicAdd(&data_b[i], data_a[i]);
}
}
template <typename T>
struct AddFunctor {
T operator()(const T& a, const T& b) { return a + b; }
};
template <typename T>
void TestCase(size_t num) {
T *in1, *in2, *out;
T *d_in1, *d_in2;
size_t size = sizeof(T) * num;
#ifdef PADDLE_WITH_HIP
hipMalloc(reinterpret_cast<void**>(&d_in1), size);
hipMalloc(reinterpret_cast<void**>(&d_in2), size);
#else
hipMalloc(reinterpret_cast<void**>(&d_in1), size);
hipMalloc(reinterpret_cast<void**>(&d_in2), size);
#endif
in1 = reinterpret_cast<T*>(malloc(size));
in2 = reinterpret_cast<T*>(malloc(size));
out = reinterpret_cast<T*>(malloc(size));
std::minstd_rand engine;
std::uniform_real_distribution<double> dist(0.0, 1.0);
for (size_t i = 0; i < num; ++i) {
in1[i] = static_cast<T>(dist(engine));
in2[i] = static_cast<T>(dist(engine));
}
#ifdef PADDLE_WITH_HIP
hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice);
hipMemcpy(d_in2, in2, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(HIP_KERNEL_NAME(AddKernel<T>), dim3(1),
dim3(PADDLE_CUDA_NUM_THREADS), 0, 0, d_in1, d_in2, num);
hipDeviceSynchronize();
hipMemcpy(out, d_in2, size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
#else
hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice);
hipMemcpy(d_in2, in2, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( AddKernel<T>), dim3(1), dim3(PADDLE_CUDA_NUM_THREADS), 0, 0, d_in1, d_in2, num);
hipDeviceSynchronize();
hipMemcpy(out, d_in2, size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
#endif
for (size_t i = 0; i < num; ++i) {
// NOTE(dzhwinter): the float16 add has small underflow/overflow
// so we use EXPECT_NEAR to check the result.
EXPECT_NEAR(static_cast<float>(out[i]),
static_cast<float>(AddFunctor<T>()(in1[i], in2[i])), 0.001);
}
free(in1);
free(in2);
free(out);
#ifdef PADDLE_WITH_HIP
hipFree(d_in1);
hipFree(d_in2);
#else
hipFree(d_in1);
hipFree(d_in2);
#endif
}
// cuda primitives
TEST(CudaAtomic, Add) {
TestCase<float>(static_cast<size_t>(10));
TestCase<float>(static_cast<size_t>(1024 * 1024));
TestCase<double>(static_cast<size_t>(10));
TestCase<double>(static_cast<size_t>(1024 * 1024));
}
TEST(CudaAtomic, float16) {
TestCase<float16>(static_cast<size_t>(1));
TestCase<float16>(static_cast<size_t>(2));
TestCase<float16>(static_cast<size_t>(3));
TestCase<float16>(static_cast<size_t>(10));
TestCase<float16>(static_cast<size_t>(1024 * 1024));
}
// unalignment of uint8
void TestUnalign(size_t num, const int shift_bit) {
ASSERT_EQ(num % 2, 0);
float16 *in1, *in2, *out;
float16 *d_in1, *d_in2;
size_t size = sizeof(uint8_t) * (num + shift_bit);
size_t array_size = sizeof(float16) * (num / 2);
#ifdef PADDLE_WITH_HIP
hipMalloc(reinterpret_cast<void**>(&d_in1), size);
hipMalloc(reinterpret_cast<void**>(&d_in2), size);
#else
hipMalloc(reinterpret_cast<void**>(&d_in1), size);
hipMalloc(reinterpret_cast<void**>(&d_in2), size);
#endif
in1 = reinterpret_cast<float16*>(malloc(size));
in2 = reinterpret_cast<float16*>(malloc(size));
out = reinterpret_cast<float16*>(malloc(size));
// right shift 1, mimic the unalignment of address
float16* r_in1 =
reinterpret_cast<float16*>(reinterpret_cast<uint8_t*>(in1) + shift_bit);
float16* r_in2 =
reinterpret_cast<float16*>(reinterpret_cast<uint8_t*>(in2) + shift_bit);
std::minstd_rand engine;
std::uniform_real_distribution<double> dist(0.0, 1.0);
for (size_t i = 0; i < num / 2; ++i) {
r_in1[i] = static_cast<float16>(dist(engine));
r_in2[i] = static_cast<float16>(dist(engine));
}
#ifdef PADDLE_WITH_HIP
hipMemcpy(d_in1, r_in1, array_size, hipMemcpyHostToDevice);
hipMemcpy(d_in2, r_in2, array_size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(HIP_KERNEL_NAME(AddKernel<float16>), dim3(1),
dim3(PADDLE_CUDA_NUM_THREADS), 0, 0, d_in1, d_in2,
num / 2);
hipDeviceSynchronize();
hipMemcpy(out, d_in2, array_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
#else
hipMemcpy(d_in1, r_in1, array_size, hipMemcpyHostToDevice);
hipMemcpy(d_in2, r_in2, array_size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( AddKernel<float16>), dim3(1), dim3(PADDLE_CUDA_NUM_THREADS), 0, 0, d_in1, d_in2, num / 2);
hipDeviceSynchronize();
hipMemcpy(out, d_in2, array_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
#endif
for (size_t i = 0; i < num / 2; ++i) {
// NOTE(dzhwinter): the float16 add has small truncate error.
// so we use EXPECT_NEAR to check the result.
EXPECT_NEAR(static_cast<float>(out[i]),
static_cast<float>(AddFunctor<float16>()(r_in1[i], r_in2[i])),
0.001);
}
free(in1);
free(in2);
free(out);
#ifdef PADDLE_WITH_HIP
hipFree(d_in1);
hipFree(d_in2);
#else
hipFree(d_in1);
hipFree(d_in2);
#endif
}
TEST(CudaAtomic, float16Unalign) {
// same with float16 testcase
TestUnalign(static_cast<size_t>(2), /*shift_bit*/ 2);
TestUnalign(static_cast<size_t>(1024), /*shift_bit*/ 2);
TestUnalign(static_cast<size_t>(1024 * 1024), /*shift_bit*/ 2);
// shift the address.
TestUnalign(static_cast<size_t>(2), /*shift_bit*/ 1);
TestUnalign(static_cast<size_t>(1024), /*shift_bit*/ 1);
TestUnalign(static_cast<size_t>(1024 * 1024), /*shift_bit*/ 1);
TestUnalign(static_cast<size_t>(2), /*shift_bit*/ 3);
TestUnalign(static_cast<size_t>(1024), /*shift_bit*/ 3);
TestUnalign(static_cast<size_t>(1024 * 1024), /*shift_bit*/ 3);
}
// https://devblogs.nvidia.com/faster-parallel-reductions-kepler/
template <typename T>
static __forceinline__ __device__ T WarpReduceSum(T val) {
unsigned mask = 0u;
CREATE_SHFL_MASK(mask, true);
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
val += paddle::platform::CudaShuffleDownSync(mask, val, offset);
}
return val;
}
template <typename T>
__forceinline__ __device__ T BlockReduce(T val) {
static __shared__ T shared[32]; // Shared mem for 32 partial sums
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = WarpReduceSum(val); // Each warp performs partial reduction
if (lane == 0) shared[wid] = val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
// read from shared memory only if that warp existed
val =
(threadIdx.x < blockDim.x / warpSize) ? shared[lane] : static_cast<T>(0);
if (wid == 0) val = WarpReduceSum(val); // Final reduce within first warp
return val;
}
template <typename T>
__global__ void DeviceReduceSum(T* in, T* out, size_t N) {
T sum(0);
CUDA_KERNEL_LOOP(i, N) { sum += in[i]; }
sum = BlockReduce<T>(sum);
__syncthreads();
if (threadIdx.x == 0) out[blockIdx.x] = sum;
}
template <typename T>
void TestReduce(size_t num, float atol = 0.01) {
T* in1;
T *d_in1, *d_in2;
size_t size = sizeof(T) * num;
#ifdef PADDLE_WITH_HIP
hipMalloc(reinterpret_cast<void**>(&d_in1), size);
hipMalloc(reinterpret_cast<void**>(&d_in2), sizeof(T));
#else
hipMalloc(reinterpret_cast<void**>(&d_in1), size);
hipMalloc(reinterpret_cast<void**>(&d_in2), sizeof(T));
#endif
in1 = reinterpret_cast<T*>(malloc(size));
std::minstd_rand engine;
std::uniform_real_distribution<double> dist(0.0, 1.0);
for (size_t i = 0; i < num; ++i) {
in1[i] = static_cast<T>(dist(engine));
}
auto out = std::accumulate(in1, in1 + num, static_cast<T>(0));
#ifdef PADDLE_WITH_HIP
hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipLaunchKernelGGL(HIP_KERNEL_NAME(DeviceReduceSum<T>), dim3(1),
dim3(PADDLE_CUDA_NUM_THREADS), 0, 0, d_in1, d_in2, num);
hipMemcpy(in1, d_in2, sizeof(T), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
#else
hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipLaunchKernelGGL(( DeviceReduceSum<T>), dim3(1), dim3(PADDLE_CUDA_NUM_THREADS), 0, 0, d_in1, d_in2, num);
hipMemcpy(in1, d_in2, sizeof(T), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
#endif
// NOTE(dzhwinter): the float16 add has small underflow/overflow
// so we use EXPECT_NEAR to check the result.
EXPECT_NEAR(static_cast<float>(in1[0]), static_cast<float>(out), atol);
free(in1);
#ifdef PADDLE_WITH_HIP
hipFree(d_in1);
hipFree(d_in2);
#else
hipFree(d_in1);
hipFree(d_in2);
#endif
}
TEST(CudaShuffleSync, float16) {
TestReduce<float>(10);
TestReduce<float>(1000);
// float16 will overflow or accumulate truncate errors in big size.
TestReduce<float16>(10);
TestReduce<float16>(100, /*atol error*/ 1.0);
}
| b63e203e510f6ecf19a93edd4f11813301ad6e46.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <algorithm>
#include <iostream>
#ifdef _WIN32
#include <numeric>
#endif
#include <random>
#define PADDLE_CUDA_FP16
#include "paddle/fluid/platform/cuda_device_function.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/cuda_helper.h"
using paddle::platform::PADDLE_CUDA_NUM_THREADS;
using paddle::platform::float16;
template <typename T>
__global__ void AddKernel(const T* data_a, T* data_b, size_t num) {
CUDA_KERNEL_LOOP(i, num) {
paddle::platform::CudaAtomicAdd(&data_b[i], data_a[i]);
}
}
template <typename T>
struct AddFunctor {
T operator()(const T& a, const T& b) { return a + b; }
};
template <typename T>
void TestCase(size_t num) {
T *in1, *in2, *out;
T *d_in1, *d_in2;
size_t size = sizeof(T) * num;
#ifdef PADDLE_WITH_HIP
hipMalloc(reinterpret_cast<void**>(&d_in1), size);
hipMalloc(reinterpret_cast<void**>(&d_in2), size);
#else
cudaMalloc(reinterpret_cast<void**>(&d_in1), size);
cudaMalloc(reinterpret_cast<void**>(&d_in2), size);
#endif
in1 = reinterpret_cast<T*>(malloc(size));
in2 = reinterpret_cast<T*>(malloc(size));
out = reinterpret_cast<T*>(malloc(size));
std::minstd_rand engine;
std::uniform_real_distribution<double> dist(0.0, 1.0);
for (size_t i = 0; i < num; ++i) {
in1[i] = static_cast<T>(dist(engine));
in2[i] = static_cast<T>(dist(engine));
}
#ifdef PADDLE_WITH_HIP
hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice);
hipMemcpy(d_in2, in2, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(HIP_KERNEL_NAME(AddKernel<T>), dim3(1),
dim3(PADDLE_CUDA_NUM_THREADS), 0, 0, d_in1, d_in2, num);
hipDeviceSynchronize();
hipMemcpy(out, d_in2, size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
#else
cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice);
AddKernel<T><<<1, PADDLE_CUDA_NUM_THREADS>>>(d_in1, d_in2, num);
cudaDeviceSynchronize();
cudaMemcpy(out, d_in2, size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
#endif
for (size_t i = 0; i < num; ++i) {
// NOTE(dzhwinter): the float16 add has small underflow/overflow
// so we use EXPECT_NEAR to check the result.
EXPECT_NEAR(static_cast<float>(out[i]),
static_cast<float>(AddFunctor<T>()(in1[i], in2[i])), 0.001);
}
free(in1);
free(in2);
free(out);
#ifdef PADDLE_WITH_HIP
hipFree(d_in1);
hipFree(d_in2);
#else
cudaFree(d_in1);
cudaFree(d_in2);
#endif
}
// cuda primitives
TEST(CudaAtomic, Add) {
TestCase<float>(static_cast<size_t>(10));
TestCase<float>(static_cast<size_t>(1024 * 1024));
TestCase<double>(static_cast<size_t>(10));
TestCase<double>(static_cast<size_t>(1024 * 1024));
}
TEST(CudaAtomic, float16) {
TestCase<float16>(static_cast<size_t>(1));
TestCase<float16>(static_cast<size_t>(2));
TestCase<float16>(static_cast<size_t>(3));
TestCase<float16>(static_cast<size_t>(10));
TestCase<float16>(static_cast<size_t>(1024 * 1024));
}
// unalignment of uint8
void TestUnalign(size_t num, const int shift_bit) {
ASSERT_EQ(num % 2, 0);
float16 *in1, *in2, *out;
float16 *d_in1, *d_in2;
size_t size = sizeof(uint8_t) * (num + shift_bit);
size_t array_size = sizeof(float16) * (num / 2);
#ifdef PADDLE_WITH_HIP
hipMalloc(reinterpret_cast<void**>(&d_in1), size);
hipMalloc(reinterpret_cast<void**>(&d_in2), size);
#else
cudaMalloc(reinterpret_cast<void**>(&d_in1), size);
cudaMalloc(reinterpret_cast<void**>(&d_in2), size);
#endif
in1 = reinterpret_cast<float16*>(malloc(size));
in2 = reinterpret_cast<float16*>(malloc(size));
out = reinterpret_cast<float16*>(malloc(size));
// right shift 1, mimic the unalignment of address
float16* r_in1 =
reinterpret_cast<float16*>(reinterpret_cast<uint8_t*>(in1) + shift_bit);
float16* r_in2 =
reinterpret_cast<float16*>(reinterpret_cast<uint8_t*>(in2) + shift_bit);
std::minstd_rand engine;
std::uniform_real_distribution<double> dist(0.0, 1.0);
for (size_t i = 0; i < num / 2; ++i) {
r_in1[i] = static_cast<float16>(dist(engine));
r_in2[i] = static_cast<float16>(dist(engine));
}
#ifdef PADDLE_WITH_HIP
hipMemcpy(d_in1, r_in1, array_size, hipMemcpyHostToDevice);
hipMemcpy(d_in2, r_in2, array_size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(HIP_KERNEL_NAME(AddKernel<float16>), dim3(1),
dim3(PADDLE_CUDA_NUM_THREADS), 0, 0, d_in1, d_in2,
num / 2);
hipDeviceSynchronize();
hipMemcpy(out, d_in2, array_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
#else
cudaMemcpy(d_in1, r_in1, array_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_in2, r_in2, array_size, cudaMemcpyHostToDevice);
AddKernel<float16><<<1, PADDLE_CUDA_NUM_THREADS>>>(d_in1, d_in2, num / 2);
cudaDeviceSynchronize();
cudaMemcpy(out, d_in2, array_size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
#endif
for (size_t i = 0; i < num / 2; ++i) {
// NOTE(dzhwinter): the float16 add has small truncate error.
// so we use EXPECT_NEAR to check the result.
EXPECT_NEAR(static_cast<float>(out[i]),
static_cast<float>(AddFunctor<float16>()(r_in1[i], r_in2[i])),
0.001);
}
free(in1);
free(in2);
free(out);
#ifdef PADDLE_WITH_HIP
hipFree(d_in1);
hipFree(d_in2);
#else
cudaFree(d_in1);
cudaFree(d_in2);
#endif
}
TEST(CudaAtomic, float16Unalign) {
// same with float16 testcase
TestUnalign(static_cast<size_t>(2), /*shift_bit*/ 2);
TestUnalign(static_cast<size_t>(1024), /*shift_bit*/ 2);
TestUnalign(static_cast<size_t>(1024 * 1024), /*shift_bit*/ 2);
// shift the address.
TestUnalign(static_cast<size_t>(2), /*shift_bit*/ 1);
TestUnalign(static_cast<size_t>(1024), /*shift_bit*/ 1);
TestUnalign(static_cast<size_t>(1024 * 1024), /*shift_bit*/ 1);
TestUnalign(static_cast<size_t>(2), /*shift_bit*/ 3);
TestUnalign(static_cast<size_t>(1024), /*shift_bit*/ 3);
TestUnalign(static_cast<size_t>(1024 * 1024), /*shift_bit*/ 3);
}
// https://devblogs.nvidia.com/faster-parallel-reductions-kepler/
template <typename T>
static __forceinline__ __device__ T WarpReduceSum(T val) {
unsigned mask = 0u;
CREATE_SHFL_MASK(mask, true);
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
val += paddle::platform::CudaShuffleDownSync(mask, val, offset);
}
return val;
}
template <typename T>
__forceinline__ __device__ T BlockReduce(T val) {
static __shared__ T shared[32]; // Shared mem for 32 partial sums
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = WarpReduceSum(val); // Each warp performs partial reduction
if (lane == 0) shared[wid] = val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
// read from shared memory only if that warp existed
val =
(threadIdx.x < blockDim.x / warpSize) ? shared[lane] : static_cast<T>(0);
if (wid == 0) val = WarpReduceSum(val); // Final reduce within first warp
return val;
}
template <typename T>
__global__ void DeviceReduceSum(T* in, T* out, size_t N) {
T sum(0);
CUDA_KERNEL_LOOP(i, N) { sum += in[i]; }
sum = BlockReduce<T>(sum);
__syncthreads();
if (threadIdx.x == 0) out[blockIdx.x] = sum;
}
template <typename T>
void TestReduce(size_t num, float atol = 0.01) {
T* in1;
T *d_in1, *d_in2;
size_t size = sizeof(T) * num;
#ifdef PADDLE_WITH_HIP
hipMalloc(reinterpret_cast<void**>(&d_in1), size);
hipMalloc(reinterpret_cast<void**>(&d_in2), sizeof(T));
#else
cudaMalloc(reinterpret_cast<void**>(&d_in1), size);
cudaMalloc(reinterpret_cast<void**>(&d_in2), sizeof(T));
#endif
in1 = reinterpret_cast<T*>(malloc(size));
std::minstd_rand engine;
std::uniform_real_distribution<double> dist(0.0, 1.0);
for (size_t i = 0; i < num; ++i) {
in1[i] = static_cast<T>(dist(engine));
}
auto out = std::accumulate(in1, in1 + num, static_cast<T>(0));
#ifdef PADDLE_WITH_HIP
hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipLaunchKernelGGL(HIP_KERNEL_NAME(DeviceReduceSum<T>), dim3(1),
dim3(PADDLE_CUDA_NUM_THREADS), 0, 0, d_in1, d_in2, num);
hipMemcpy(in1, d_in2, sizeof(T), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
#else
cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
DeviceReduceSum<T><<<1, PADDLE_CUDA_NUM_THREADS>>>(d_in1, d_in2, num);
cudaMemcpy(in1, d_in2, sizeof(T), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
#endif
// NOTE(dzhwinter): the float16 add has small underflow/overflow
// so we use EXPECT_NEAR to check the result.
EXPECT_NEAR(static_cast<float>(in1[0]), static_cast<float>(out), atol);
free(in1);
#ifdef PADDLE_WITH_HIP
hipFree(d_in1);
hipFree(d_in2);
#else
cudaFree(d_in1);
cudaFree(d_in2);
#endif
}
TEST(CudaShuffleSync, float16) {
TestReduce<float>(10);
TestReduce<float>(1000);
// float16 will overflow or accumulate truncate errors in big size.
TestReduce<float16>(10);
TestReduce<float16>(100, /*atol error*/ 1.0);
}
|
7bf2e226b0a511d8fdba79b3c615350dab1a808f.hip | // !!! This is a file automatically generated by hipify!!!
/*-------------------------------------------------------------------------
*
* CUDA function for backrpojection using FDK weigts for CBCT
*
*
* CODE by Ander Biguri & Sepideh Hatamikia
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "voxel_backprojection.hpp"
#include "voxel_backprojection_spherical.hpp"
#include "mex.h"
#include <math.h>
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:Atb",hipGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
| / /| |
A Z | / / |*D |
| | +--------+ | |
| | | | | |
| | | *O | + |
*--->y | | | / |
/ | | |/ |
V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
texture<float, hipTextureType3D , hipReadModeElementType> tex;
__global__ void FDKweigths(const Geometry geo,float* image,float constant){
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
for(; idx<geo.nVoxelX* geo.nVoxelY *geo.nVoxelZ; idx+=gridDim.x*blockDim.x) {
image[idx]*=constant;
}
}
__global__ void kernelPixelBackprojectionFDK_spherical(const Geometry geo,
float* image,
const int indAlpha,
const float COR,
const Point3D deltaX,
const Point3D deltaY,
const Point3D deltaZ,
const Point3D xyzOrigin,
const Point3D xyzOffset,
const Point3D uv0Offset,
const Point3D source){
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long indZ = blockIdx.z * blockDim.z + threadIdx.z;
//Make sure we dont go out of bounds
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |indZ>=geo.nVoxelZ)
return;
// Geometric trasnformations:
//Source, scaled XYZ coordinates
// "XYZ" in the scaled coordinate system of the current point. The iamge is rotated with the projection angles.
Point3D P;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -source.x);
vectY=(P.y -source.y);
vectZ=(P.z -source.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=(geo.DSO-geo.DSD /*-DDO*/ - source.x)/vectX;
float y,z;
y=vectY*t+source.y;
z=vectZ*t+source.z;
float u,v;
u=y+geo.nDetecU/2;
v=z+geo.nDetecV/2;
float weigth;
float realx,realy;
realx=-geo.sVoxelX/2+geo.dVoxelX/2 +indX*geo.dVoxelX +xyzOffset.x;
realy=-geo.sVoxelY/2+geo.dVoxelY/2 +indY*geo.dVoxelY +xyzOffset.y+COR;
weigth=(geo.DSO+realy*sin(geo.alpha)-realx*cos(geo.alpha))/geo.DSO; //TODO: This is wrong for shperical
weigth=1/(weigth*weigth);
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
image[idx]+=tex3D(tex, v ,
u ,
indAlpha+0.5)
*weigth;
// image[idx]=v;
}
int voxel_backprojection_spherical(float const * const projections, Geometry geo, float* result,float const * const angles,int nalpha){
/*
* Allocate texture memory on the device
*/
// copy data to CUDA memory
hipArray *d_projectiondata = 0;
const hipExtent extent = make_hipExtent(geo.nDetecU,geo.nDetecV,nalpha);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipMalloc3DArray(&d_projectiondata, &channelDesc, extent);
cudaCheckErrors("hipMalloc3D error 3D tex");
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr((void*)projections, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_projectiondata;
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3D(©Params);
cudaCheckErrors("hipMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = hipFilterModeLinear;
tex.addressMode[0] = hipAddressModeBorder;
tex.addressMode[1] = hipAddressModeBorder;
tex.addressMode[2] = hipAddressModeBorder;
hipBindTextureToArray(tex, d_projectiondata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
// Allocate result image memory
size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float);
float* dimage;
hipMalloc((void**)&dimage, num_bytes);
hipMemset(dimage,0,num_bytes);
cudaCheckErrors("hipMalloc fail");
// If we are going to time
bool timekernel=false;
hipEvent_t start, stop;
float elapsedTime;
if (timekernel){
hipEventCreate(&start);
hipEventRecord(start,0);
}
int divx,divy,divz;
//enpirical
divx=32;
divy=32;
divz=1;
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geo.nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,divz);
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, offDetec,source;
for (unsigned int i=0;i<nalpha;i++){
geo.alpha=-angles[i*3];
geo.theta=-angles[i*3+1];
geo.psi =-angles[i*3+2];
computeDeltasCubeSpherical(geo,i,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
offOrig.x=geo.offOrigX[i];
offOrig.y=geo.offOrigY[i];
offDetec.x=geo.offDetecU[i];
offDetec.y=geo.offDetecV[i];
hipLaunchKernelGGL(( kernelPixelBackprojectionFDK_spherical), dim3(grid),dim3(block), 0, 0, geo,dimage,i,geo.COR[i],deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,source);
cudaCheckErrors("Kernel fail");
}
if (timekernel){
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start,stop);
mexPrintf("%f\n" ,elapsedTime);
cudaCheckErrors("cuda Timing fail");
}
hipMemcpy(result, dimage, num_bytes, hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy result fail");
hipUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
hipFree(dimage);
hipFreeArray(d_projectiondata);
cudaCheckErrors("hipFree d_imagedata fail");
//hipDeviceReset();
return 0;
}
void computeDeltasCubeSpherical(Geometry geo, int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D *S){
Point3D P, Px,Py,Pz;
// Get coords of Img(0,0,0)
P.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px.x=P.x+geo.dVoxelX; Py.x=P.x; Pz.x=P.x;
Px.y=P.y; Py.y=P.y+geo.dVoxelY; Pz.y=P.y;
Px.z=P.z; Py.z=P.z; Pz.z=P.z+geo.dVoxelZ;
// Rotate image around X axis (this is equivalent of rotating the source and detector) RZ RY RZ
eulerZYZT(geo,&P);
eulerZYZT(geo,&Px);
eulerZYZT(geo,&Py);
eulerZYZT(geo,&Pz);
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
//Detector Roll pitch Yaw
//
//
// first, we need to offset everything so (0,0,0) is the center of the detector
// Only X is required for that
P.x=P.x+(geo.DSD-geo.DSO);
Px.x=Px.x+(geo.DSD-geo.DSO);
Py.x=Py.x+(geo.DSD-geo.DSO);
Pz.x=Pz.x+(geo.DSD-geo.DSO);
rollPitchYawT(geo,i,&P);
rollPitchYawT(geo,i,&Px);
rollPitchYawT(geo,i,&Py);
rollPitchYawT(geo,i,&Pz);
P.x=P.x-(geo.DSD-geo.DSO);
Px.x=Px.x-(geo.DSD-geo.DSO);
Py.x=Py.x-(geo.DSD-geo.DSO);
Pz.x=Pz.x-(geo.DSD-geo.DSO);
//Done for P, now source
Point3D source;
source.x=geo.DSD; //allready offseted for rotation
source.y=-geo.offDetecU[i];
source.z=-geo.offDetecV[i];
rollPitchYawT(geo,i,&source);
source.x=source.x-(geo.DSD-geo.DSO);// source.y=source.y-auxOff.y; source.z=source.z-auxOff.z;
// mexPrintf("%f,%f,%f\n",source.x,source.y,source.z);
// Scale coords so detector pixels are 1x1
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
*S=source;
}
void eulerZYZT(Geometry geo, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x = auxPoint.x*(cos(geo.psi)*cos(geo.theta)*cos(geo.alpha)-sin(geo.psi)*sin(geo.alpha))
+auxPoint.y*(-cos(geo.psi)*cos(geo.theta)*sin(geo.alpha)-sin(geo.psi)*cos(geo.alpha))
+auxPoint.z*cos(geo.psi)*sin(geo.theta);
point->y = auxPoint.x*(sin(geo.psi)*cos(geo.theta)*cos(geo.alpha)+cos(geo.psi)*sin(geo.alpha))
+auxPoint.y*(-sin(geo.psi)*cos(geo.theta)*sin(geo.alpha)+cos(geo.psi)*cos(geo.alpha))
+auxPoint.z*sin(geo.psi)*sin(geo.theta);
point->z =-auxPoint.x*sin(geo.theta)*cos(geo.alpha)
+auxPoint.y*sin(geo.theta)*sin(geo.alpha)
+auxPoint.z*cos(geo.theta);
} | 7bf2e226b0a511d8fdba79b3c615350dab1a808f.cu | /*-------------------------------------------------------------------------
*
* CUDA function for backrpojection using FDK weigts for CBCT
*
*
* CODE by Ander Biguri & Sepideh Hatamikia
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include "voxel_backprojection.hpp"
#include "voxel_backprojection_spherical.hpp"
#include "mex.h"
#include <math.h>
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:Atb",cudaGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
| / /| |
A Z | / / |*D |
| | +--------+ | |
| | | | | |
| | | *O | + |
*--->y | | | / |
/ | | |/ |
V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
texture<float, cudaTextureType3D , cudaReadModeElementType> tex;
__global__ void FDKweigths(const Geometry geo,float* image,float constant){
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
for(; idx<geo.nVoxelX* geo.nVoxelY *geo.nVoxelZ; idx+=gridDim.x*blockDim.x) {
image[idx]*=constant;
}
}
__global__ void kernelPixelBackprojectionFDK_spherical(const Geometry geo,
float* image,
const int indAlpha,
const float COR,
const Point3D deltaX,
const Point3D deltaY,
const Point3D deltaZ,
const Point3D xyzOrigin,
const Point3D xyzOffset,
const Point3D uv0Offset,
const Point3D source){
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long indZ = blockIdx.z * blockDim.z + threadIdx.z;
//Make sure we dont go out of bounds
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |indZ>=geo.nVoxelZ)
return;
// Geometric trasnformations:
//Source, scaled XYZ coordinates
// "XYZ" in the scaled coordinate system of the current point. The iamge is rotated with the projection angles.
Point3D P;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -source.x);
vectY=(P.y -source.y);
vectZ=(P.z -source.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=(geo.DSO-geo.DSD /*-DDO*/ - source.x)/vectX;
float y,z;
y=vectY*t+source.y;
z=vectZ*t+source.z;
float u,v;
u=y+geo.nDetecU/2;
v=z+geo.nDetecV/2;
float weigth;
float realx,realy;
realx=-geo.sVoxelX/2+geo.dVoxelX/2 +indX*geo.dVoxelX +xyzOffset.x;
realy=-geo.sVoxelY/2+geo.dVoxelY/2 +indY*geo.dVoxelY +xyzOffset.y+COR;
weigth=(geo.DSO+realy*sin(geo.alpha)-realx*cos(geo.alpha))/geo.DSO; //TODO: This is wrong for shperical
weigth=1/(weigth*weigth);
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
image[idx]+=tex3D(tex, v ,
u ,
indAlpha+0.5)
*weigth;
// image[idx]=v;
}
int voxel_backprojection_spherical(float const * const projections, Geometry geo, float* result,float const * const angles,int nalpha){
/*
* Allocate texture memory on the device
*/
// copy data to CUDA memory
cudaArray *d_projectiondata = 0;
const cudaExtent extent = make_cudaExtent(geo.nDetecU,geo.nDetecV,nalpha);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaMalloc3DArray(&d_projectiondata, &channelDesc, extent);
cudaCheckErrors("cudaMalloc3D error 3D tex");
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr((void*)projections, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_projectiondata;
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(©Params);
cudaCheckErrors("cudaMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = cudaFilterModeLinear;
tex.addressMode[0] = cudaAddressModeBorder;
tex.addressMode[1] = cudaAddressModeBorder;
tex.addressMode[2] = cudaAddressModeBorder;
cudaBindTextureToArray(tex, d_projectiondata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
// Allocate result image memory
size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float);
float* dimage;
cudaMalloc((void**)&dimage, num_bytes);
cudaMemset(dimage,0,num_bytes);
cudaCheckErrors("cudaMalloc fail");
// If we are going to time
bool timekernel=false;
cudaEvent_t start, stop;
float elapsedTime;
if (timekernel){
cudaEventCreate(&start);
cudaEventRecord(start,0);
}
int divx,divy,divz;
//enpirical
divx=32;
divy=32;
divz=1;
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geo.nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,divz);
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, offDetec,source;
for (unsigned int i=0;i<nalpha;i++){
geo.alpha=-angles[i*3];
geo.theta=-angles[i*3+1];
geo.psi =-angles[i*3+2];
computeDeltasCubeSpherical(geo,i,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
offOrig.x=geo.offOrigX[i];
offOrig.y=geo.offOrigY[i];
offDetec.x=geo.offDetecU[i];
offDetec.y=geo.offDetecV[i];
kernelPixelBackprojectionFDK_spherical<<<grid,block>>>(geo,dimage,i,geo.COR[i],deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,source);
cudaCheckErrors("Kernel fail");
}
if (timekernel){
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
mexPrintf("%f\n" ,elapsedTime);
cudaCheckErrors("cuda Timing fail");
}
cudaMemcpy(result, dimage, num_bytes, cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy result fail");
cudaUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
cudaFree(dimage);
cudaFreeArray(d_projectiondata);
cudaCheckErrors("cudaFree d_imagedata fail");
//cudaDeviceReset();
return 0;
}
void computeDeltasCubeSpherical(Geometry geo, int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D *S){
Point3D P, Px,Py,Pz;
// Get coords of Img(0,0,0)
P.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px.x=P.x+geo.dVoxelX; Py.x=P.x; Pz.x=P.x;
Px.y=P.y; Py.y=P.y+geo.dVoxelY; Pz.y=P.y;
Px.z=P.z; Py.z=P.z; Pz.z=P.z+geo.dVoxelZ;
// Rotate image around X axis (this is equivalent of rotating the source and detector) RZ RY RZ
eulerZYZT(geo,&P);
eulerZYZT(geo,&Px);
eulerZYZT(geo,&Py);
eulerZYZT(geo,&Pz);
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
//Detector Roll pitch Yaw
//
//
// first, we need to offset everything so (0,0,0) is the center of the detector
// Only X is required for that
P.x=P.x+(geo.DSD-geo.DSO);
Px.x=Px.x+(geo.DSD-geo.DSO);
Py.x=Py.x+(geo.DSD-geo.DSO);
Pz.x=Pz.x+(geo.DSD-geo.DSO);
rollPitchYawT(geo,i,&P);
rollPitchYawT(geo,i,&Px);
rollPitchYawT(geo,i,&Py);
rollPitchYawT(geo,i,&Pz);
P.x=P.x-(geo.DSD-geo.DSO);
Px.x=Px.x-(geo.DSD-geo.DSO);
Py.x=Py.x-(geo.DSD-geo.DSO);
Pz.x=Pz.x-(geo.DSD-geo.DSO);
//Done for P, now source
Point3D source;
source.x=geo.DSD; //allready offseted for rotation
source.y=-geo.offDetecU[i];
source.z=-geo.offDetecV[i];
rollPitchYawT(geo,i,&source);
source.x=source.x-(geo.DSD-geo.DSO);// source.y=source.y-auxOff.y; source.z=source.z-auxOff.z;
// mexPrintf("%f,%f,%f\n",source.x,source.y,source.z);
// Scale coords so detector pixels are 1x1
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
*S=source;
}
void eulerZYZT(Geometry geo, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x = auxPoint.x*(cos(geo.psi)*cos(geo.theta)*cos(geo.alpha)-sin(geo.psi)*sin(geo.alpha))
+auxPoint.y*(-cos(geo.psi)*cos(geo.theta)*sin(geo.alpha)-sin(geo.psi)*cos(geo.alpha))
+auxPoint.z*cos(geo.psi)*sin(geo.theta);
point->y = auxPoint.x*(sin(geo.psi)*cos(geo.theta)*cos(geo.alpha)+cos(geo.psi)*sin(geo.alpha))
+auxPoint.y*(-sin(geo.psi)*cos(geo.theta)*sin(geo.alpha)+cos(geo.psi)*cos(geo.alpha))
+auxPoint.z*sin(geo.psi)*sin(geo.theta);
point->z =-auxPoint.x*sin(geo.theta)*cos(geo.alpha)
+auxPoint.y*sin(geo.theta)*sin(geo.alpha)
+auxPoint.z*cos(geo.theta);
} |
d113d4eff3e570c9aab910f726645c0f15933fcf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "gputimer.h"
const int X= 8; // Number of inputs
const int H= 4; //Hidden layer size
const int K= 32; // tile size is KxK
const int N= X*H; //Total elements
// Utility functions: compare, print, and fill matrices
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line)
{
if (err != hipSuccess) {
fprintf(stderr, "CUDA error at: %s : %d\n", file,line);
fprintf(stderr, "%s %s\n", hipGetErrorString(err), func);;
exit(1);
}
}
int compare_matrices(float *gpu, float *ref)
{
int result = 0;
for(int j=0; j < H; j++){
for(int i=0; i < X; i++)
if (ref[i + j*X] != gpu[i + j*X])
{
// printf("reference(%d,%d) = %f but test(%d,%d) = %f\n",
// i,j,ref[i+j*N],i,j,test[i+j*N]);
result = 1;
}
}
return result;
}
void print_matrix(float *mat)
{
for(int j=0; j < H; j++)
{
for(int i=0; i < X; i++) { printf("%4.4g ", mat[i + j*X]); }
printf("\n");
}
}
void print_input(float *mat)
{
for(int j=0; j < X; j++)
{
printf("%4.4g ", mat[j]);
}
}
// fill a matrix with sequential numbers in the range 0..N-1
void fill_matrix(float *mat, int rowSize,int columnSize)
{
for(int j=0; j < rowSize * columnSize; j++)
mat[j] = (float) j;
}
void
matmul_CPU(float input[],float weight[],float out[])
{
for(int j=0; j < H; j++){
for(int i=0; i < X; i++){
out[j*X+i] += weight[j*X+i]*input[i]; // out(j,i) = in(i,j)
}
}
}
//matmul parallel
__global__ void
matmul_parallel(float input[], float weight[], float out[])
{
//(i,j) location of element
int intWeightX = blockIdx.x*blockDim.x + threadIdx.x;
int intInputX = threadIdx.x;
__shared__ float title[N];
title[intWeightX]= input[intInputX]*weight[intWeightX];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[intWeightX] = title[intWeightX];
}
int main(int argc, char **argv)
{
int numbytes_input = X * sizeof(float);
int numbytes_weight = X * H * sizeof(float);
int numbytes_out = H * sizeof(float);
float *input = (float *) malloc(numbytes_input);
float *weight = (float *) malloc(numbytes_weight);
float *out = (float *) malloc(numbytes_weight);
float *gold = (float *) malloc(numbytes_weight);
fill_matrix(input, X, 1);
fill_matrix(weight, X, H);
matmul_CPU(input, weight, gold);
float *d_input, *d_weight, *d_out;
hipMalloc(&d_input, numbytes_input);
hipMalloc(&d_weight, numbytes_weight);
hipMalloc(&d_out, numbytes_out);
hipMemcpy(d_input, input, numbytes_input, hipMemcpyHostToDevice);
hipMemcpy(d_weight, weight, numbytes_weight, hipMemcpyHostToDevice);
GpuTimer timer;
/*
* Now time each kernel and verify that it produces the correct result.
*
* To be really careful about benchmarking purposes, we should run every kernel once
* to "warm" the system and avoid any compilation or code-caching effects, then run
* every kernel 10 or 100 times and average the timings to smooth out any variance.
* But this makes for messy code and our goal is teaching, not detailed benchmarking.
*/
timer.Start();
dim3 blocks(N/K,1); // blocks per grid
dim3 threads(K,1); // threads per block
printf("blocks %d\n",N/K);
print_matrix(weight);
hipLaunchKernelGGL(( matmul_parallel), dim3(blocks),dim3(threads), 0, 0, d_input,d_weight, d_out);
hipMemcpy(out, d_out, numbytes_out, hipMemcpyDeviceToHost);
printf("weight\n");
print_matrix(weight);
printf("input\n");
print_input(input);
printf("\n");
printf("gpu\n");
print_matrix(out);
printf("cpu\n");
print_matrix(gold);
printf("multiply_matrices: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
hipFree(d_input);
hipFree(d_weight);
hipFree(d_out);
}
| d113d4eff3e570c9aab910f726645c0f15933fcf.cu | #include <stdio.h>
#include "gputimer.h"
const int X= 8; // Number of inputs
const int H= 4; //Hidden layer size
const int K= 32; // tile size is KxK
const int N= X*H; //Total elements
// Utility functions: compare, print, and fill matrices
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line)
{
if (err != cudaSuccess) {
fprintf(stderr, "CUDA error at: %s : %d\n", file,line);
fprintf(stderr, "%s %s\n", cudaGetErrorString(err), func);;
exit(1);
}
}
int compare_matrices(float *gpu, float *ref)
{
int result = 0;
for(int j=0; j < H; j++){
for(int i=0; i < X; i++)
if (ref[i + j*X] != gpu[i + j*X])
{
// printf("reference(%d,%d) = %f but test(%d,%d) = %f\n",
// i,j,ref[i+j*N],i,j,test[i+j*N]);
result = 1;
}
}
return result;
}
void print_matrix(float *mat)
{
for(int j=0; j < H; j++)
{
for(int i=0; i < X; i++) { printf("%4.4g ", mat[i + j*X]); }
printf("\n");
}
}
void print_input(float *mat)
{
for(int j=0; j < X; j++)
{
printf("%4.4g ", mat[j]);
}
}
// fill a matrix with sequential numbers in the range 0..N-1
void fill_matrix(float *mat, int rowSize,int columnSize)
{
for(int j=0; j < rowSize * columnSize; j++)
mat[j] = (float) j;
}
void
matmul_CPU(float input[],float weight[],float out[])
{
for(int j=0; j < H; j++){
for(int i=0; i < X; i++){
out[j*X+i] += weight[j*X+i]*input[i]; // out(j,i) = in(i,j)
}
}
}
//matmul parallel
__global__ void
matmul_parallel(float input[], float weight[], float out[])
{
//(i,j) location of element
int intWeightX = blockIdx.x*blockDim.x + threadIdx.x;
int intInputX = threadIdx.x;
__shared__ float title[N];
title[intWeightX]= input[intInputX]*weight[intWeightX];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[intWeightX] = title[intWeightX];
}
int main(int argc, char **argv)
{
int numbytes_input = X * sizeof(float);
int numbytes_weight = X * H * sizeof(float);
int numbytes_out = H * sizeof(float);
float *input = (float *) malloc(numbytes_input);
float *weight = (float *) malloc(numbytes_weight);
float *out = (float *) malloc(numbytes_weight);
float *gold = (float *) malloc(numbytes_weight);
fill_matrix(input, X, 1);
fill_matrix(weight, X, H);
matmul_CPU(input, weight, gold);
float *d_input, *d_weight, *d_out;
cudaMalloc(&d_input, numbytes_input);
cudaMalloc(&d_weight, numbytes_weight);
cudaMalloc(&d_out, numbytes_out);
cudaMemcpy(d_input, input, numbytes_input, cudaMemcpyHostToDevice);
cudaMemcpy(d_weight, weight, numbytes_weight, cudaMemcpyHostToDevice);
GpuTimer timer;
/*
* Now time each kernel and verify that it produces the correct result.
*
* To be really careful about benchmarking purposes, we should run every kernel once
* to "warm" the system and avoid any compilation or code-caching effects, then run
* every kernel 10 or 100 times and average the timings to smooth out any variance.
* But this makes for messy code and our goal is teaching, not detailed benchmarking.
*/
timer.Start();
dim3 blocks(N/K,1); // blocks per grid
dim3 threads(K,1); // threads per block
printf("blocks %d\n",N/K);
print_matrix(weight);
matmul_parallel<<<blocks,threads>>>(d_input,d_weight, d_out);
cudaMemcpy(out, d_out, numbytes_out, cudaMemcpyDeviceToHost);
printf("weight\n");
print_matrix(weight);
printf("input\n");
print_input(input);
printf("\n");
printf("gpu\n");
print_matrix(out);
printf("cpu\n");
print_matrix(gold);
printf("multiply_matrices: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
cudaFree(d_input);
cudaFree(d_weight);
cudaFree(d_out);
}
|
d7f13b81a2146deed3b6cac0ac61aebaa39f25c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// To compile: nvcc HW4.cu -o temp; ./temp
#include <sys/time.h>
#include <stdio.h>
//---global vars---access for GPU
const int N = 2000;
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
const int threadsPerBlock = prop.maxThreadsPerBlock;
const int blocksPerGrid = ((N-1)/threadsPerBlock)+1;
//error check func for methods
void CUDAErrorCheck(const char *message)
{
hipError_t error;
error = hipGetLastError();
if(error != hipSuccess)
{
printf("\n CUDA ERROR in: %s -> %s\n", message, hipGetErrorString(error));
exit(0);
}
}
__global__ void dotProd(float *A_GPU, float *B_GPU, float *C_GPU){
//---dotProd---will give the thread ability to share mem on block
__shared__ float sh_mem[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x*blockDim.x;
//---dotProd---limits at the nr_threads/Block
int sh_mem_id = threadIdx.x;
//---dotProd---store the product of id temporarily
float temp = 0.0;
while(tid < N){
temp += A_GPU[tid] * B_GPU[tid];
tid += blockDim.x * gridDim.x; //takes us to the id in the next block
}
//---dotProd---set mem val for that id
sh_mem[sh_mem_id] = temp;
__syncthreads();
//---dotProd---for any number vector
//change
int i = blockDim.x/2;
while(i != 0){
//will only execute if threadId within vector length
if(sh_mem_id < i){
//halfing the vector and adding the matching locations
sh_mem[sh_mem_id] += sh_mem[sh_mem_id + i];
__syncthreads();
}
i /= 2;
}
//---dotProd---inserts block sum into C_GPU to add later
if(sh_mem_id == 0)
C_GPU[blockIdx.x] = sh_mem[0];
}
int main()
{
long id;
float *A_CPU, *B_CPU, *C_CPU, sumErrytin; //Pointers for memory on the Host
// Your variables start here.
float *A_GPU, *B_GPU, *C_GPU;
// Your variables stop here.
A_CPU = (float*)malloc(N*sizeof(float));
B_CPU = (float*)malloc(N*sizeof(float));
C_CPU = (float*)malloc(N*sizeof(float));
for(id = 0; id < N; id++) {A_CPU[id] = 1; B_CPU[id] = 3;}
// Your code starts here.
//---main---mallocGPU
hipMalloc(&A_GPU, N*sizeof(float));
CUDAErrorCheck("hipMalloc A_GPU");
hipMalloc(&B_GPU, N*sizeof(float));
CUDAErrorCheck("hipMalloc B_GPU");
hipMalloc(&C_GPU, blocksPerGrid*sizeof(float));
CUDAErrorCheck("hipMalloc C_GPU");
//---main---memCpy host->dev
hipMemcpy(A_GPU, A_CPU, N*sizeof(float), hipMemcpyHostToDevice);
CUDAErrorCheck("A_CPU --> A_GPU cpy");
hipMemcpy(B_GPU, B_CPU, N*sizeof(float), hipMemcpyHostToDevice);
CUDAErrorCheck("B_CPU --> B_GPU cpy");
//---main---kernel exec
hipLaunchKernelGGL(( dotProd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, A_GPU, B_GPU, C_GPU);
CUDAErrorCheck("dotProd kernel exec");
//---main---memCpy dev->host
hipMemcpy(C_CPU, C_GPU, blocksPerGrid*sizeof(float), hipMemcpyDeviceToHost);
CUDAErrorCheck("C_GPU --> C_CPU cpy");
//---main---sumErrytin
sumErrytin = 0.0;
for(int i=0; i<blocksPerGrid; i++){
sumErrytin += C_CPU[i];
}
printf("value: %f", sumErrytin);
//---main---free mem gpu
hipFree(A_GPU);
CUDAErrorCheck("freeing A_GPU");
hipFree(B_GPU);
CUDAErrorCheck("freeing B_GPU");
hipFree(C_GPU);
CUDAErrorCheck("freeing C_GPU");
//---main---free mem cpu
free(A_CPU);
CUDAErrorCheck("freeing A_CPU");
free(B_CPU);
CUDAErrorCheck("freeing B_CPU");
free(C_CPU);
CUDAErrorCheck("freeing C_CPU");
// Your code stops here.
return(0);
}
| d7f13b81a2146deed3b6cac0ac61aebaa39f25c4.cu | // To compile: nvcc HW4.cu -o temp; ./temp
#include <sys/time.h>
#include <stdio.h>
//---global vars---access for GPU
const int N = 2000;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
const int threadsPerBlock = prop.maxThreadsPerBlock;
const int blocksPerGrid = ((N-1)/threadsPerBlock)+1;
//error check func for methods
void CUDAErrorCheck(const char *message)
{
cudaError_t error;
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("\n CUDA ERROR in: %s -> %s\n", message, cudaGetErrorString(error));
exit(0);
}
}
__global__ void dotProd(float *A_GPU, float *B_GPU, float *C_GPU){
//---dotProd---will give the thread ability to share mem on block
__shared__ float sh_mem[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x*blockDim.x;
//---dotProd---limits at the nr_threads/Block
int sh_mem_id = threadIdx.x;
//---dotProd---store the product of id temporarily
float temp = 0.0;
while(tid < N){
temp += A_GPU[tid] * B_GPU[tid];
tid += blockDim.x * gridDim.x; //takes us to the id in the next block
}
//---dotProd---set mem val for that id
sh_mem[sh_mem_id] = temp;
__syncthreads();
//---dotProd---for any number vector
//change
int i = blockDim.x/2;
while(i != 0){
//will only execute if threadId within vector length
if(sh_mem_id < i){
//halfing the vector and adding the matching locations
sh_mem[sh_mem_id] += sh_mem[sh_mem_id + i];
__syncthreads();
}
i /= 2;
}
//---dotProd---inserts block sum into C_GPU to add later
if(sh_mem_id == 0)
C_GPU[blockIdx.x] = sh_mem[0];
}
int main()
{
long id;
float *A_CPU, *B_CPU, *C_CPU, sumErrytin; //Pointers for memory on the Host
// Your variables start here.
float *A_GPU, *B_GPU, *C_GPU;
// Your variables stop here.
A_CPU = (float*)malloc(N*sizeof(float));
B_CPU = (float*)malloc(N*sizeof(float));
C_CPU = (float*)malloc(N*sizeof(float));
for(id = 0; id < N; id++) {A_CPU[id] = 1; B_CPU[id] = 3;}
// Your code starts here.
//---main---mallocGPU
cudaMalloc(&A_GPU, N*sizeof(float));
CUDAErrorCheck("cudaMalloc A_GPU");
cudaMalloc(&B_GPU, N*sizeof(float));
CUDAErrorCheck("cudaMalloc B_GPU");
cudaMalloc(&C_GPU, blocksPerGrid*sizeof(float));
CUDAErrorCheck("cudaMalloc C_GPU");
//---main---memCpy host->dev
cudaMemcpy(A_GPU, A_CPU, N*sizeof(float), cudaMemcpyHostToDevice);
CUDAErrorCheck("A_CPU --> A_GPU cpy");
cudaMemcpy(B_GPU, B_CPU, N*sizeof(float), cudaMemcpyHostToDevice);
CUDAErrorCheck("B_CPU --> B_GPU cpy");
//---main---kernel exec
dotProd<<<blocksPerGrid, threadsPerBlock>>>(A_GPU, B_GPU, C_GPU);
CUDAErrorCheck("dotProd kernel exec");
//---main---memCpy dev->host
cudaMemcpy(C_CPU, C_GPU, blocksPerGrid*sizeof(float), cudaMemcpyDeviceToHost);
CUDAErrorCheck("C_GPU --> C_CPU cpy");
//---main---sumErrytin
sumErrytin = 0.0;
for(int i=0; i<blocksPerGrid; i++){
sumErrytin += C_CPU[i];
}
printf("value: %f", sumErrytin);
//---main---free mem gpu
cudaFree(A_GPU);
CUDAErrorCheck("freeing A_GPU");
cudaFree(B_GPU);
CUDAErrorCheck("freeing B_GPU");
cudaFree(C_GPU);
CUDAErrorCheck("freeing C_GPU");
//---main---free mem cpu
free(A_CPU);
CUDAErrorCheck("freeing A_CPU");
free(B_CPU);
CUDAErrorCheck("freeing B_CPU");
free(C_CPU);
CUDAErrorCheck("freeing C_CPU");
// Your code stops here.
return(0);
}
|
zbcsrlupivloc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
#define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)]
#define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)]
#define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b)
#define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b)
//============================================================
#define ldb m
#define lda m
#define ldc m
// every multiprocessor handles one BCSR-block
__global__ void
zbcsrlupivloc_kernel(
int size_b,
int kblocks,
double **A,
magma_int_t *ipiv)
{
if( blockIdx.x < kblocks ) {
if(threadIdx.x < size_b ){
for( int i=0; i<size_b; i++){
int dst = ipiv[i]-1;
if( dst != i ){
double *A1 = A[blockIdx.x]+threadIdx.x*size_b+i;
double *A2 = A[blockIdx.x]+threadIdx.x*size_b+dst;
double tmp = *A2;
*A2 = *A1;
*A1 = tmp;
}
}
}
}
}
/**
Purpose
-------
For a Block-CSR ILU factorization, this routine updates all blocks in
the trailing matrix.
Arguments
---------
@param[in]
size_b magma_int_t
blocksize in BCSR
@param[in]
kblocks magma_int_t
number of blocks
@param[in]
dA magmaDoubleComplex_ptr *
matrix in BCSR
@param[in]
ipiv magmaInt_ptr
array containing pivots
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbcsrlupivloc(
magma_int_t size_b,
magma_int_t kblocks,
magmaDoubleComplex_ptr *dA,
magmaInt_ptr ipiv,
magma_queue_t queue )
{
#if defined(PRECISION_d)
dim3 threads( 64, 1 );
dim3 grid(kblocks, 1, 1);
hipLaunchKernelGGL(( zbcsrlupivloc_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
size_b, kblocks, dA, ipiv );
#endif
return MAGMA_SUCCESS;
}
| zbcsrlupivloc.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
#define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)]
#define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)]
#define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b)
#define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b)
//============================================================
#define ldb m
#define lda m
#define ldc m
// every multiprocessor handles one BCSR-block
__global__ void
zbcsrlupivloc_kernel(
int size_b,
int kblocks,
double **A,
magma_int_t *ipiv)
{
if( blockIdx.x < kblocks ) {
if(threadIdx.x < size_b ){
for( int i=0; i<size_b; i++){
int dst = ipiv[i]-1;
if( dst != i ){
double *A1 = A[blockIdx.x]+threadIdx.x*size_b+i;
double *A2 = A[blockIdx.x]+threadIdx.x*size_b+dst;
double tmp = *A2;
*A2 = *A1;
*A1 = tmp;
}
}
}
}
}
/**
Purpose
-------
For a Block-CSR ILU factorization, this routine updates all blocks in
the trailing matrix.
Arguments
---------
@param[in]
size_b magma_int_t
blocksize in BCSR
@param[in]
kblocks magma_int_t
number of blocks
@param[in]
dA magmaDoubleComplex_ptr *
matrix in BCSR
@param[in]
ipiv magmaInt_ptr
array containing pivots
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbcsrlupivloc(
magma_int_t size_b,
magma_int_t kblocks,
magmaDoubleComplex_ptr *dA,
magmaInt_ptr ipiv,
magma_queue_t queue )
{
#if defined(PRECISION_d)
dim3 threads( 64, 1 );
dim3 grid(kblocks, 1, 1);
zbcsrlupivloc_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(
size_b, kblocks, dA, ipiv );
#endif
return MAGMA_SUCCESS;
}
|
b67bc40206d0d74d2f1090235b6a5f51b821fafe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define BLOCK_SIZE 256
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void spmvJDSKernel(float *out, int *matColStart, int *matCols,
int *matRowPerm, int *matRows,
float *matData, float *vec, int dim) {
//@@ insert spmv kernel for jds format
int row = threadIdx.x + blockDim.x * blockIdx.x;
int sec = 0;
float result = 0;
if(row < dim){
for(sec = 0;sec < matRows[row];sec++)
result += matData[matColStart[sec] + row] * vec[matCols[matColStart[sec] + row]];
out[matRowPerm[row]] = result;
}
}
static void spmvJDS(float *out, int *matColStart, int *matCols,
int *matRowPerm, int *matRows, float *matData,
float *vec, int dim) {
//@@ invoke spmv kernel for jds format
hipLaunchKernelGGL(( spmvJDSKernel), dim3(ceil(dim * 1.0 / BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, 0, out, matColStart, matCols, matRowPerm, matRows, matData, vec, dim);
}
int main(int argc, char **argv) {
wbArg_t args;
int *hostCSRCols;
int *hostCSRRows;
float *hostCSRData;
int *hostJDSColStart; // Indicate the start index of each column
int *hostJDSCols; // Indicate the column of each element
int *hostJDSRowPerm; // Record the row number before permutation
int *hostJDSRows; // Indicates the length of each row(after permutation), it can be caculated using hostJDSColStart
float *hostJDSData; // Mat
float *hostVector; // X
float *hostOutput; // Y
int *deviceJDSColStart;
int *deviceJDSCols;
int *deviceJDSRowPerm;
int *deviceJDSRows;
float *deviceJDSData;
float *deviceVector;
float *deviceOutput;
int dim, ncols, nrows, ndata;
int maxRowNNZ;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostCSRCols = (int *)wbImport(wbArg_getInputFile(args, 0), &ncols, "Integer");
hostCSRRows = (int *)wbImport(wbArg_getInputFile(args, 1), &nrows, "Integer");
hostCSRData = (float *)wbImport(wbArg_getInputFile(args, 2), &ndata, "Real");
hostVector = (float *)wbImport(wbArg_getInputFile(args, 3), &dim, "Real");
hostOutput = (float *)malloc(sizeof(float) * dim);
wbTime_stop(Generic, "Importing data and creating memory on host");
CSRToJDS(dim, hostCSRRows, hostCSRCols, hostCSRData, &hostJDSRowPerm, &hostJDSRows,
&hostJDSColStart, &hostJDSCols, &hostJDSData);
maxRowNNZ = hostJDSRows[0];
wbTime_start(GPU, "Allocating GPU memory.");
hipMalloc((void **)&deviceJDSColStart, sizeof(int) * maxRowNNZ);
hipMalloc((void **)&deviceJDSCols, sizeof(int) * ndata);
hipMalloc((void **)&deviceJDSRowPerm, sizeof(int) * dim);
hipMalloc((void **)&deviceJDSRows, sizeof(int) * dim);
hipMalloc((void **)&deviceJDSData, sizeof(float) * ndata);
hipMalloc((void **)&deviceVector, sizeof(float) * dim);
hipMalloc((void **)&deviceOutput, sizeof(float) * dim);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
hipMemcpy(deviceJDSColStart, hostJDSColStart, sizeof(int) * maxRowNNZ,
hipMemcpyHostToDevice);
hipMemcpy(deviceJDSCols, hostJDSCols, sizeof(int) * ndata, hipMemcpyHostToDevice);
hipMemcpy(deviceJDSRowPerm, hostJDSRowPerm, sizeof(int) * dim, hipMemcpyHostToDevice);
hipMemcpy(deviceJDSRows, hostJDSRows, sizeof(int) * dim, hipMemcpyHostToDevice);
hipMemcpy(deviceJDSData, hostJDSData, sizeof(float) * ndata, hipMemcpyHostToDevice);
hipMemcpy(deviceVector, hostVector, sizeof(float) * dim, hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
wbTime_start(Compute, "Performing CUDA computation");
spmvJDS(deviceOutput, deviceJDSColStart, deviceJDSCols, deviceJDSRowPerm, deviceJDSRows,
deviceJDSData, deviceVector, dim);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
hipMemcpy(hostOutput, deviceOutput, sizeof(float) * dim, hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
hipFree(deviceVector);
hipFree(deviceOutput);
hipFree(deviceJDSColStart);
hipFree(deviceJDSCols);
hipFree(deviceJDSRowPerm);
hipFree(deviceJDSRows);
hipFree(deviceJDSData);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, dim);
free(hostCSRCols);
free(hostCSRRows);
free(hostCSRData);
free(hostVector);
free(hostOutput);
free(hostJDSColStart);
free(hostJDSCols);
free(hostJDSRowPerm);
free(hostJDSRows);
free(hostJDSData);
return 0;
}
| b67bc40206d0d74d2f1090235b6a5f51b821fafe.cu | #include <wb.h>
#define BLOCK_SIZE 256
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void spmvJDSKernel(float *out, int *matColStart, int *matCols,
int *matRowPerm, int *matRows,
float *matData, float *vec, int dim) {
//@@ insert spmv kernel for jds format
int row = threadIdx.x + blockDim.x * blockIdx.x;
int sec = 0;
float result = 0;
if(row < dim){
for(sec = 0;sec < matRows[row];sec++)
result += matData[matColStart[sec] + row] * vec[matCols[matColStart[sec] + row]];
out[matRowPerm[row]] = result;
}
}
static void spmvJDS(float *out, int *matColStart, int *matCols,
int *matRowPerm, int *matRows, float *matData,
float *vec, int dim) {
//@@ invoke spmv kernel for jds format
spmvJDSKernel<<<ceil(dim * 1.0 / BLOCK_SIZE), BLOCK_SIZE>>>(out, matColStart, matCols, matRowPerm, matRows, matData, vec, dim);
}
int main(int argc, char **argv) {
wbArg_t args;
int *hostCSRCols;
int *hostCSRRows;
float *hostCSRData;
int *hostJDSColStart; // Indicate the start index of each column
int *hostJDSCols; // Indicate the column of each element
int *hostJDSRowPerm; // Record the row number before permutation
int *hostJDSRows; // Indicates the length of each row(after permutation), it can be caculated using hostJDSColStart
float *hostJDSData; // Mat
float *hostVector; // X
float *hostOutput; // Y
int *deviceJDSColStart;
int *deviceJDSCols;
int *deviceJDSRowPerm;
int *deviceJDSRows;
float *deviceJDSData;
float *deviceVector;
float *deviceOutput;
int dim, ncols, nrows, ndata;
int maxRowNNZ;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostCSRCols = (int *)wbImport(wbArg_getInputFile(args, 0), &ncols, "Integer");
hostCSRRows = (int *)wbImport(wbArg_getInputFile(args, 1), &nrows, "Integer");
hostCSRData = (float *)wbImport(wbArg_getInputFile(args, 2), &ndata, "Real");
hostVector = (float *)wbImport(wbArg_getInputFile(args, 3), &dim, "Real");
hostOutput = (float *)malloc(sizeof(float) * dim);
wbTime_stop(Generic, "Importing data and creating memory on host");
CSRToJDS(dim, hostCSRRows, hostCSRCols, hostCSRData, &hostJDSRowPerm, &hostJDSRows,
&hostJDSColStart, &hostJDSCols, &hostJDSData);
maxRowNNZ = hostJDSRows[0];
wbTime_start(GPU, "Allocating GPU memory.");
cudaMalloc((void **)&deviceJDSColStart, sizeof(int) * maxRowNNZ);
cudaMalloc((void **)&deviceJDSCols, sizeof(int) * ndata);
cudaMalloc((void **)&deviceJDSRowPerm, sizeof(int) * dim);
cudaMalloc((void **)&deviceJDSRows, sizeof(int) * dim);
cudaMalloc((void **)&deviceJDSData, sizeof(float) * ndata);
cudaMalloc((void **)&deviceVector, sizeof(float) * dim);
cudaMalloc((void **)&deviceOutput, sizeof(float) * dim);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
cudaMemcpy(deviceJDSColStart, hostJDSColStart, sizeof(int) * maxRowNNZ,
cudaMemcpyHostToDevice);
cudaMemcpy(deviceJDSCols, hostJDSCols, sizeof(int) * ndata, cudaMemcpyHostToDevice);
cudaMemcpy(deviceJDSRowPerm, hostJDSRowPerm, sizeof(int) * dim, cudaMemcpyHostToDevice);
cudaMemcpy(deviceJDSRows, hostJDSRows, sizeof(int) * dim, cudaMemcpyHostToDevice);
cudaMemcpy(deviceJDSData, hostJDSData, sizeof(float) * ndata, cudaMemcpyHostToDevice);
cudaMemcpy(deviceVector, hostVector, sizeof(float) * dim, cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
wbTime_start(Compute, "Performing CUDA computation");
spmvJDS(deviceOutput, deviceJDSColStart, deviceJDSCols, deviceJDSRowPerm, deviceJDSRows,
deviceJDSData, deviceVector, dim);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
cudaMemcpy(hostOutput, deviceOutput, sizeof(float) * dim, cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
cudaFree(deviceVector);
cudaFree(deviceOutput);
cudaFree(deviceJDSColStart);
cudaFree(deviceJDSCols);
cudaFree(deviceJDSRowPerm);
cudaFree(deviceJDSRows);
cudaFree(deviceJDSData);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, dim);
free(hostCSRCols);
free(hostCSRRows);
free(hostCSRData);
free(hostVector);
free(hostOutput);
free(hostJDSColStart);
free(hostJDSCols);
free(hostJDSRowPerm);
free(hostJDSRows);
free(hostJDSData);
return 0;
}
|
90661abf78f228541c7ace07e7b46522c12e5d02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "wb.h"
#define wbCheck(stmt)\
do {\
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt);\
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err));\
return -1;\
}\
} while (0)
#define CHANNELS 3
__global__ void colorConvert(float * grayImg, float * rgbImg, int width, int height);
//@@ INSERT CODE HERE
int main(int argc, char *argv[]) {
wbArg_t args;
int imageChannels;
int imageWidth;
int imageHeight;
char *inputImageFile;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *deviceInputImageData;
float *deviceOutputImageData;
args = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(args, 0);
inputImage = wbImport(inputImageFile);
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
// For this lab the value is always 3
imageChannels = wbImage_getChannels(inputImage);
// Since the image is monochromatic, it only contains one channel
outputImage = wbImage_new(imageWidth, imageHeight, 1);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
hipMalloc((void **)&deviceInputImageData,imageWidth * imageHeight * imageChannels * sizeof(float));
hipMalloc((void **)&deviceOutputImageData,imageWidth * imageHeight * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
hipMemcpy(deviceInputImageData, hostInputImageData,imageWidth * imageHeight * imageChannels * sizeof(float),hipMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
///////////////////////////////////////////////////////
wbTime_start(Compute, "Doing the computation on the GPU");
//@@ INSERT CODE HERE to convert colored image to gray
dim3 DimGrid( (imageHeight - 1)/16, (imageWidth - 1)/16+1, 1 );
dim3 DimBlock(16,16,1);
hipLaunchKernelGGL(( colorConvert) , dim3(DimGrid), dim3(DimBlock) , 0, 0, deviceOutputImageData, deviceInputImageData, imageWidth, imageHeight);
wbTime_stop(Compute, "Doing the computation on the GPU");
///////////////////////////////////////////////////////
wbTime_start(Copy, "Copying data from the GPU");
hipMemcpy(hostOutputImageData, deviceOutputImageData,
imageWidth * imageHeight * sizeof(float),
hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
unsigned char grayScale[imageHeight][imageWidth];
int i, j;
for (j = 0; j < imageHeight; ++j) {
for (i = 0; i < imageWidth; ++i) {
grayScale[j][i] = ceil(hostOutputImageData[i + j * imageWidth] * 255.0);
}
}
FILE *fp = fopen("grayImageFile.ppm", "wb"); /* b - binary mode */
fprintf(fp, "P5\n%d %d\n255\n", imageWidth, imageHeight);
fwrite(grayScale, sizeof(grayScale), 1, fp);
fclose(fp);
hipFree(deviceInputImageData);
hipFree(deviceOutputImageData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
return 0;
}
// define colorConvert
__global__ void colorConvert(float * grayImg, float * rgbImg, int width, int height){
int x = threadIdx.x + blockDim.x * blockIdx.x ;
int y = threadIdx.y + blockDim.y * blockIdx.y ;
if(y<width && x< height){
int grayOffset = y*width + x;
int rgbOffset = grayOffset*CHANNELS;
float r = rgbImg[rgbOffset];
float g = rgbImg[rgbOffset+2];
float b = rgbImg[rgbOffset+3];
grayImg[grayOffset]= 0.21f*r + 0.71f*g + 0.07f*b;
}
}
| 90661abf78f228541c7ace07e7b46522c12e5d02.cu | #include "wb.h"
#define wbCheck(stmt)\
do {\
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt);\
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err));\
return -1;\
}\
} while (0)
#define CHANNELS 3
__global__ void colorConvert(float * grayImg, float * rgbImg, int width, int height);
//@@ INSERT CODE HERE
int main(int argc, char *argv[]) {
wbArg_t args;
int imageChannels;
int imageWidth;
int imageHeight;
char *inputImageFile;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *deviceInputImageData;
float *deviceOutputImageData;
args = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(args, 0);
inputImage = wbImport(inputImageFile);
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
// For this lab the value is always 3
imageChannels = wbImage_getChannels(inputImage);
// Since the image is monochromatic, it only contains one channel
outputImage = wbImage_new(imageWidth, imageHeight, 1);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
cudaMalloc((void **)&deviceInputImageData,imageWidth * imageHeight * imageChannels * sizeof(float));
cudaMalloc((void **)&deviceOutputImageData,imageWidth * imageHeight * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
cudaMemcpy(deviceInputImageData, hostInputImageData,imageWidth * imageHeight * imageChannels * sizeof(float),cudaMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
///////////////////////////////////////////////////////
wbTime_start(Compute, "Doing the computation on the GPU");
//@@ INSERT CODE HERE to convert colored image to gray
dim3 DimGrid( (imageHeight - 1)/16, (imageWidth - 1)/16+1, 1 );
dim3 DimBlock(16,16,1);
colorConvert <<< DimGrid, DimBlock >>> (deviceOutputImageData, deviceInputImageData, imageWidth, imageHeight);
wbTime_stop(Compute, "Doing the computation on the GPU");
///////////////////////////////////////////////////////
wbTime_start(Copy, "Copying data from the GPU");
cudaMemcpy(hostOutputImageData, deviceOutputImageData,
imageWidth * imageHeight * sizeof(float),
cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
unsigned char grayScale[imageHeight][imageWidth];
int i, j;
for (j = 0; j < imageHeight; ++j) {
for (i = 0; i < imageWidth; ++i) {
grayScale[j][i] = ceil(hostOutputImageData[i + j * imageWidth] * 255.0);
}
}
FILE *fp = fopen("grayImageFile.ppm", "wb"); /* b - binary mode */
fprintf(fp, "P5\n%d %d\n255\n", imageWidth, imageHeight);
fwrite(grayScale, sizeof(grayScale), 1, fp);
fclose(fp);
cudaFree(deviceInputImageData);
cudaFree(deviceOutputImageData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
return 0;
}
// define colorConvert
__global__ void colorConvert(float * grayImg, float * rgbImg, int width, int height){
int x = threadIdx.x + blockDim.x * blockIdx.x ;
int y = threadIdx.y + blockDim.y * blockIdx.y ;
if(y<width && x< height){
int grayOffset = y*width + x;
int rgbOffset = grayOffset*CHANNELS;
float r = rgbImg[rgbOffset];
float g = rgbImg[rgbOffset+2];
float b = rgbImg[rgbOffset+3];
grayImg[grayOffset]= 0.21f*r + 0.71f*g + 0.07f*b;
}
}
|
d6381ccdc2345ab8be18263245d5a282a25c7a99.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void NesterovUpdate(int N, Dtype* g, Dtype* h,
Dtype momentum, Dtype local_rate) {
CUDA_KERNEL_LOOP(i, N) {
//float hi = h[i];
//float hi_new = h[i] = momentum * hi + local_rate * g[i];
//g[i] = (1+momentum) * hi_new - momentum * hi;
h[i] = momentum*h[i] + g[i];
g[i] = local_rate*momentum*h[i] + local_rate*g[i];
}
}
template <typename Dtype>
void nesterov_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum,
Dtype local_rate) {
NesterovUpdate<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, g, h, momentum, local_rate);
CUDA_POST_KERNEL_CHECK;
}
template void nesterov_update_gpu<float>(int, float*, float*, float, float);
template void nesterov_update_gpu<double>(int, double*, double*, double,
double);
} // namespace caffe
| d6381ccdc2345ab8be18263245d5a282a25c7a99.cu | #include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void NesterovUpdate(int N, Dtype* g, Dtype* h,
Dtype momentum, Dtype local_rate) {
CUDA_KERNEL_LOOP(i, N) {
//float hi = h[i];
//float hi_new = h[i] = momentum * hi + local_rate * g[i];
//g[i] = (1+momentum) * hi_new - momentum * hi;
h[i] = momentum*h[i] + g[i];
g[i] = local_rate*momentum*h[i] + local_rate*g[i];
}
}
template <typename Dtype>
void nesterov_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum,
Dtype local_rate) {
NesterovUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, g, h, momentum, local_rate);
CUDA_POST_KERNEL_CHECK;
}
template void nesterov_update_gpu<float>(int, float*, float*, float, float);
template void nesterov_update_gpu<double>(int, double*, double*, double,
double);
} // namespace caffe
|
84c0b4d89845f65a12da045de049d6c07541d38d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Conway's Game of Life CUDA implementation in C, using only global memory, with enough threads to process every cell in the grid.
Papavasileiou Themis 24/01/2015
*/
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/time.h>
#include <unistd.h>
struct timeval startwtime, endwtime;
double seq_time;
void die(const char *message){
if(errno){
perror(message);
}else{
printf("Error: %s\n", message);
}
exit(1);
}
void warn(const char *message){
if(errno){
perror(message);
}else{
printf("Error: %s\n", message);
}
return;
}
void write_to_file( int *X, char *filename, int N)
{
FILE *fp;
char newfilename[100];
sprintf(newfilename,"GameOfLifecuda%dX%d.bin",N,N);
if ( !(fp=fopen(newfilename,"w+")))
{
die(" couldn't open file");
}
if( ! fwrite(X,sizeof(int),N*N,fp))
{
die("couldn't really write");
}
fclose(fp);
}
void read_from_file( int *X, char *filename, int N)
{
FILE *fp = fopen(filename,"r+");
int size = fread(X,sizeof(int),N*N,fp);
//printf("elements: %d\n",size);
fclose(fp);
}
/*
The logic is the same as in the serial version, but modified to fit the CUDA implementation needs.
*/
__global__
void evolve(int *table, int N,int *new_table)
{
//Global memory indices.
int i = (blockIdx.x*blockDim.x)+threadIdx.x;
int j = (blockIdx.y*blockDim.y)+threadIdx.y;
int k=0,sum=0;
// if the above indices are in bounds
if(i<N && j<N)
{
sum=0;
if( (i!=0 && j!=0) && (i!=N-1 && j!=N-1) )
{
for( k=0 ; k<3 ; ++k)
{
sum+= table[ (i-1)*N + (j-1+k) ]+
table[ (i+1)*N + (j-1+k)]+
table[ (i*N) + (j-1+k) ];
}
sum-= table[i*N+j];
}
else
{
//printf("edge neighbors of %d , %d are ->\n",i,j);
for(k=0 ; k<3; ++k)
{
sum+= table[ (i*N) + (j-1+k<0 ? N-1 : j-1+k>N-1 ? 0 : j-1+k ) ]+
table[ (i-1<0?N-1:i-1)*N + ( j-1+k<0 ? N-1 : j-1+k>N-1 ? 0 : j-1+k ) ]+
table[ (i+1>N-1?0:i+1)*N + ( j-1+k<0 ? N-1 : j-1+k>N-1 ? 0 : j-1+k ) ];
}
sum-= table[i*N+j]; //subtract self-value because it's counted as a neighbor.
}
if( table[i*N+j] == 0 && sum == 3 )
{
new_table[i*N+j]=1;
}
else if( table[i*N+j] == 1 && ( sum<2 || sum>3 ) )
{
new_table[i*N+j]=0;
}
else
{
new_table[i*N+j]=table[i*N+j];
}
}
return;
}
int main (int argc, char **argv)
{
//Necessary variables and float to get the time it took the GPU to compute the evolve() function.
float gputime;
char *filename=argv[1];
int N = atoi(argv[2]);
int generations=atoi(argv[3]);
//Find the nearest power of 2, to our grid size N.
int nearestPower =0;
while(true)
{
if( 1<<nearestPower < N )
{
nearestPower++;
continue;
}
break;
}
int t = atoi(argv[4]);
//user defines threads/block number
dim3 threadsPerBlock(t,t);
//Make sure we have enough threads to cover all our cells.
dim3 numBlocks((1<<nearestPower)/threadsPerBlock.x,(1<<nearestPower)/threadsPerBlock.y);
int *table=(int *)malloc(N*N*sizeof(int));
read_from_file(table, filename , N);
// Tables for our GPU.
int *new_table;
hipMalloc(&new_table,N*N*sizeof(int));
int *device_table;
hipMalloc(&device_table,N*N*sizeof(int));
int i;
hipMemcpy(device_table,table,N*N*sizeof(int),hipMemcpyHostToDevice);
//print our arguments in whatever way fits our parser for the preprocessing of report data :).
printf("%d %d \n",N,t);
printf("\n");
for(i=0;i<generations;++i)
{
/*
Some implementations use a third temp array in order to feed it to the next iteration of the Game Of Life function.
Here we save space by juggling the new_table, and device_table interchangeably, as arguments to the function.
*/
//make sure the GPU execution is timed. ( Pulled by NVIDIA forums.)
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
//The actual juggling.
if(i%2==0)
{
hipLaunchKernelGGL(( evolve), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, device_table,N,new_table);
}
else
{
hipLaunchKernelGGL(( evolve), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, new_table,N,device_table);
}
hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gputime,start,stop);
hipEventDestroy(start);
hipEventDestroy(stop) ;
printf("%g \n",gputime/1000.0f);
}
/*
Now, depending if the number of generations was even or odd, we must get the correct array of values.
*/
if(generations%2==1)
{
hipMemcpy(table,new_table,N*N*sizeof(int),hipMemcpyDeviceToHost);
}
else
{
hipMemcpy(table,device_table,N*N*sizeof(int),hipMemcpyDeviceToHost);
}
//Write it back to file!
write_to_file(table,filename,N);
//No memory left behind, on any device!
hipFree(new_table);
hipFree(device_table);
free(table);
}
| 84c0b4d89845f65a12da045de049d6c07541d38d.cu | /*
Conway's Game of Life CUDA implementation in C, using only global memory, with enough threads to process every cell in the grid.
Papavasileiou Themis 24/01/2015
*/
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/time.h>
#include <unistd.h>
struct timeval startwtime, endwtime;
double seq_time;
void die(const char *message){
if(errno){
perror(message);
}else{
printf("Error: %s\n", message);
}
exit(1);
}
void warn(const char *message){
if(errno){
perror(message);
}else{
printf("Error: %s\n", message);
}
return;
}
void write_to_file( int *X, char *filename, int N)
{
FILE *fp;
char newfilename[100];
sprintf(newfilename,"GameOfLifecuda%dX%d.bin",N,N);
if ( !(fp=fopen(newfilename,"w+")))
{
die(" couldn't open file");
}
if( ! fwrite(X,sizeof(int),N*N,fp))
{
die("couldn't really write");
}
fclose(fp);
}
void read_from_file( int *X, char *filename, int N)
{
FILE *fp = fopen(filename,"r+");
int size = fread(X,sizeof(int),N*N,fp);
//printf("elements: %d\n",size);
fclose(fp);
}
/*
The logic is the same as in the serial version, but modified to fit the CUDA implementation needs.
*/
__global__
void evolve(int *table, int N,int *new_table)
{
//Global memory indices.
int i = (blockIdx.x*blockDim.x)+threadIdx.x;
int j = (blockIdx.y*blockDim.y)+threadIdx.y;
int k=0,sum=0;
// if the above indices are in bounds
if(i<N && j<N)
{
sum=0;
if( (i!=0 && j!=0) && (i!=N-1 && j!=N-1) )
{
for( k=0 ; k<3 ; ++k)
{
sum+= table[ (i-1)*N + (j-1+k) ]+
table[ (i+1)*N + (j-1+k)]+
table[ (i*N) + (j-1+k) ];
}
sum-= table[i*N+j];
}
else
{
//printf("edge neighbors of %d , %d are ->\n",i,j);
for(k=0 ; k<3; ++k)
{
sum+= table[ (i*N) + (j-1+k<0 ? N-1 : j-1+k>N-1 ? 0 : j-1+k ) ]+
table[ (i-1<0?N-1:i-1)*N + ( j-1+k<0 ? N-1 : j-1+k>N-1 ? 0 : j-1+k ) ]+
table[ (i+1>N-1?0:i+1)*N + ( j-1+k<0 ? N-1 : j-1+k>N-1 ? 0 : j-1+k ) ];
}
sum-= table[i*N+j]; //subtract self-value because it's counted as a neighbor.
}
if( table[i*N+j] == 0 && sum == 3 )
{
new_table[i*N+j]=1;
}
else if( table[i*N+j] == 1 && ( sum<2 || sum>3 ) )
{
new_table[i*N+j]=0;
}
else
{
new_table[i*N+j]=table[i*N+j];
}
}
return;
}
int main (int argc, char **argv)
{
//Necessary variables and float to get the time it took the GPU to compute the evolve() function.
float gputime;
char *filename=argv[1];
int N = atoi(argv[2]);
int generations=atoi(argv[3]);
//Find the nearest power of 2, to our grid size N.
int nearestPower =0;
while(true)
{
if( 1<<nearestPower < N )
{
nearestPower++;
continue;
}
break;
}
int t = atoi(argv[4]);
//user defines threads/block number
dim3 threadsPerBlock(t,t);
//Make sure we have enough threads to cover all our cells.
dim3 numBlocks((1<<nearestPower)/threadsPerBlock.x,(1<<nearestPower)/threadsPerBlock.y);
int *table=(int *)malloc(N*N*sizeof(int));
read_from_file(table, filename , N);
// Tables for our GPU.
int *new_table;
cudaMalloc(&new_table,N*N*sizeof(int));
int *device_table;
cudaMalloc(&device_table,N*N*sizeof(int));
int i;
cudaMemcpy(device_table,table,N*N*sizeof(int),cudaMemcpyHostToDevice);
//print our arguments in whatever way fits our parser for the preprocessing of report data :).
printf("%d %d \n",N,t);
printf("\n");
for(i=0;i<generations;++i)
{
/*
Some implementations use a third temp array in order to feed it to the next iteration of the Game Of Life function.
Here we save space by juggling the new_table, and device_table interchangeably, as arguments to the function.
*/
//make sure the GPU execution is timed. ( Pulled by NVIDIA forums.)
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//The actual juggling.
if(i%2==0)
{
evolve<<<numBlocks,threadsPerBlock>>>(device_table,N,new_table);
}
else
{
evolve<<<numBlocks,threadsPerBlock>>>(new_table,N,device_table);
}
cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gputime,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop) ;
printf("%g \n",gputime/1000.0f);
}
/*
Now, depending if the number of generations was even or odd, we must get the correct array of values.
*/
if(generations%2==1)
{
cudaMemcpy(table,new_table,N*N*sizeof(int),cudaMemcpyDeviceToHost);
}
else
{
cudaMemcpy(table,device_table,N*N*sizeof(int),cudaMemcpyDeviceToHost);
}
//Write it back to file!
write_to_file(table,filename,N);
//No memory left behind, on any device!
cudaFree(new_table);
cudaFree(device_table);
free(table);
}
|
228c9ea2fd07261ff4d6a8f9e2e163d0be0307da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-----------------------------------------------------------------------------
File: Median_Test.cpp
Desc: Runs Median Test
-----------------------------------------------------------------------------*/
/*-------------------------------------
Includes
-------------------------------------*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, CUDA
#include <cutil_inline.h>
// includes, project
#include "KD_API.h"
/*-------------------------------------
Global Variables
-------------------------------------*/
extern AppGlobals g_app;
/*-------------------------------------
CUDA Kernels
-------------------------------------*/
//#include <Median_GPU.cu>
/*-------------------------------------
Function Declarations
-------------------------------------*/
bool RunMedianTest();
/*---------------------------------------------------------
Name: RunMedianTest()
Desc: Run a simple test of "Median Partition"
functionality on CUDA GPU framework
---------------------------------------------------------*/
bool RunMedianTest()
{
bool bResult = false;
#if 0
/*---------------------------------
Step 0. Initialize Cuda
---------------------------------*/
hipError_t cuda_err = hipSuccess;
// set seed for rand()
srand( 2009 );
g_app.hTimer = 0;
sdkCreateTimer( &(g_app.hTimer) );
/*-------------------------------------------
Step 1. Setup Initial parameters
-------------------------------------------*/
// Hard Coded for now...
g_app.bgShape.nElems = g_app.nSearch;
g_app.bgShape.threadsPerRow = MEDIAN_THREADS_PER_ROW;
g_app.bgShape.rowsPerBlock = MEDIAN_ROWS_PER_BLOCK;
bResult = ComputeBlockShapeFromVector( g_app.bgShape );
if (false == bResult)
{
// Error
return false;
}
// Make sure Matrix + vector is not to big to use up all device memory // 768 Meg on Display Card
int sizePoints = g_app.bgShape.nPadded * sizeof(float4);
int sizeDists = g_app.bgShape.nPadded * sizeof(float2);
int totalMem = sizePoints + (2*sizeDists);
// Make sure memory required to perform this operation doesn't exceed display device memory
if (totalMem >= g_app.cudaProps.totalGlobalMem)
{
// Error - not enough memory to perform operation
printf( "Matrix + Vector are too large for available device memory, running test will crash..." );
return false;
}
// Setup GPU Kernel execution parameters
// Median Sort Kernel
dim3 dimBlock( g_app.bgShape.threadsPerRow, g_app.bgShape.rowsPerBlock, 1 );
dim3 dimGrid( g_app.bgShape.blocksPerRow, g_app.bgShape.rowsPerGrid, 1 );
/*-------------------------------------------
Step 2. Allocate Vectors
-------------------------------------------*/
int nOrig = g_app.bgShape.nElems;
int nPad = g_app.bgShape.nPadded;
int w = g_app.bgShape.W;
int h = g_app.bgShape.H;
/*-----------------------
Host Memory
-----------------------*/
// allocate host memory for original points (before median sort)
int mem_size_Points = nPad * sizeof(float4);
float4* h_Points_Orig = (float4*) malloc( (size_t)mem_size_Points );
// allocate host memory for point results (after median sort)
float4 *h_Points_Result = (float4*) malloc( mem_size_Points );
// allocate host memory for CPU point results (after median sort)
float4 *h_Points_CPU = (float4*) malloc( mem_size_Points );
// Allocate host memory for singleton median index result
unsigned int mem_size_Result = 16 * sizeof(I32);
I32 *h_result_GPU = (I32 *) malloc( mem_size_Result );
h_result_GPU[0] = -1;
/*-----------------------
Device Memory
-----------------------*/
// allocate device memory for points
float4* d_Points;
checkCudaErrors( hipMalloc( (void**) &d_Points, mem_size_Points ) );
// allocate device memory for points
I32* d_result_GPU;
checkCudaErrors( hipMalloc( (void**) &d_result_GPU, mem_size_Result ) );
// allocate device memory for Reduction Vector
// Used for reduction
// IE Ping Pong between dists vector and reduce vector to get answer
// to get final answer
//bool bPingPong = true;
//float4* d_Reduce;
//checkCudaErrors( hipMalloc( (void **) &d_Reduce, mem_size_Points ) );
/*-------------------------------------------
Step 3. Initialize Vectors
-------------------------------------------*/
// Initialize Input points (to query against)
int i;
for (i = 0; i < nOrig; i++) // Original Points
{
// BUGBUG - for now just randomly generate points
// In future - we should read them in from a file...
h_Points_Orig[i].x = RandomFloat( 0.0, 1.0 );
h_Points_Orig[i].y = RandomFloat( 0.0, 1.0 );
h_Points_Orig[i].z = RandomFloat( 0.0, 1.0 );
// Store point index in this channel
h_Points_Orig[i].w = (float)i;
}
// Initialize padded points (to query against)
for (i = nOrig; i < nPad; i++) // Padded points
{
// We want padded points to always fail...
// 1st Approach,
// Use a point that is so far away it is guranteed to never get picked
// Cons: Requires advance knowledge of input point range
// and query point range to pick a point
// so far outside range it doesn't matter
// 2nd Approach,
// Duplicate the 1st point many times
// Cons: Can fail because of numerical round-off errors
// IE what if the 1st point is really the closest to the query point
// which point wins (1st point or one of it's duplicates)
//
// 1st Approach
//
h_Points[i].x = 400.0f; // Note: Any number much larger than 46,000 and we will overflow on squaring the float
h_Points[i].y = 400.0f;
h_Points[i].z = 400.0f;
h_Points[i].w = (float)-1; // Store invalid point index in this channel
//
// 2nd Approach
//
//h_Points[i].x = h_Points[0].x;
//h_Points[i].y = h_Points[0].y;
//h_Points[i].z = h_Points[0].z;
//h_Points[i].w = h_Points[0].w;
}
//
// Profile Performance Metric Initialization
//
float MED_PNT_onto_device = 0.0f;
float MED_PNT_from_device = 0.0f;
float MED_M_from_device = 0.0f;
float MED_GPU_Kernel = 0.0f;
float MED_CPU_Kernel = 0.0f;
bool checkMedianResults = true;
// Result values
int gpuMedianIdx; // Index of Median Point as computed on GPU
int cpuMedianIdx; // Index of Median Point as computed on CPU
// Profile Measurement Loop
unsigned int currIter;
for (currIter = 0; currIter < g_app.profileActualLoops; currIter++)
{
//-------------------------------------------------------
// Step 3. Move Points (& indices)
// from main memory to device memory
//-------------------------------------------------------
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Copy 'Points' vector from host memory to device memory
checkCudaErrors( hipMemcpy( d_Points, h_Points_Orig, mem_size_Points, hipMemcpyHostToDevice ) );
// Copy 'Initial' result vector from host memory to device memory
checkCudaErrors( hipMemcpy( d_result_GPU, h_result_GPU, mem_size_Results, hipMemcpyHostToDevice ) );
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
MED_PNT_onto_device += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
MED_PNT_onto_device += sdkGetTimerValue( g_app.hTimer );
}
}
//---------------------------------
// Step 4. Call Kernel Function
//---------------------------------
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Excute the Brute Force Distance Kernel
hipLaunchKernelGGL(( MedianSort_GPU), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_Points, w, h );
// Check if GPU kernel execution generated an error
cuda_err = hipGetLastError();
if( hipSuccess != cuda_err)
{
fprintf( stderr, "Cuda error: %s in file '%s' in line %i : %s.\n",
"MedianSort_GPU() failed", __FILE__, __LINE__, hipGetErrorString( cuda_err ) );
exit( EXIT_FAILURE );
}
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
MED_GPU_Kernel += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
MED_GPU_Kernel += sdkGetTimerValue( g_app.hTimer );
}
}
//-------------------------------------------------
// Step 5. Copy result vector (partitioned points)
// from device memory to main memory
//-------------------------------------------------
if (g_app.doubleCheckDists)
{
// BUGBUG - this is a temporary step to verify brute force distance calculation
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// copy result vector from device to host
checkCudaErrors( hipMemcpy( (void *) h_Points_Results, d_Points, mem_size_Points, hipMemcpyDeviceToHost ) );
// copy singleton median index from device to host
checkCudaErrors( hipMemcpy( (void *) h_results_GPU, d_results_GPU, mem_size_Results, hipMemcpyDeviceToHost ) );
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
MED_PNT_from_device += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
MED_PNT_from_device += sdkGetTimerValue( g_app.hTimer );
}
}
}
/*-------------------------------------------------
Step 6. Double check GPU result
against CPU result
-------------------------------------------------*/
if (g_app.doubleCheckDists)
{
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Compute reference solution (distances) on CPU
h_CPU_Idx = MedianSort_CPU( h_Points_CPU, h_Points_Orig, w, h );
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
MED_CPU_Kernel += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
MED_CPU_Kernel += sdkGetTimerValue( g_app.hTimer );
}
}
// Double check GPU Result against CPU result (for distances)
int NCheck = nPad;
int i;
for (i = 0; i < NCheck; i++)
{
const float eps = 1.0e-2f;
//printf( "[%d] GPU=%f, CPU=%f \n", i, gVal, cVal );
if ( ((cVal - eps) >= gVal) ||
((cVal + eps) <= gVal) )
{
// Error - Out of tolerance check range
printf( "[%d] GPU %f != CPU %f \n", i, gVal, cVal );
checkDistResults = false;
}
}
} // double check distances
/*-------------------------------------------------
Step 7. GPU Kernel to reduce distances
(& index) vector
to single best result
-------------------------------------------------*/
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Copy 'Distances' vector to 'Reduction' vector
// This is currently necessary to avoid garbage
// results in output caused by unitialized values
checkCudaErrors( hipMemcpy( d_Reduce, d_Dists, mem_size_Dists_GPU, hipMemcpyDeviceToDevice ) );
int reduceElems = nPad;
dim3 reduceThreads;
dim3 reduceGrid;
BlockGridShape reduceShape;
// Compute Initial Grid Shape
reduceShape.nElems = reduceElems;
reduceShape.threadsPerRow = BFMR_THREADS_PER_ROW;
reduceShape.rowsPerBlock = BFMR_ROWS_PER_BLOCK;
ComputeBlockShapeFromVector( reduceShape );
// Make sure we have an even number of blocks to work on
if ((reduceShape.blocksPerRow % 2) != 0)
{
// Error - not an even number of blocks
fprintf( stderr, "Error - not an even number of blocks\n" );
return false;
}
reduceThreads.x = reduceShape.threadsPerRow;
reduceThreads.y = reduceShape.rowsPerBlock;
reduceThreads.z = 1;
reduceGrid.x = reduceShape.blocksPerRow / 2; // Divide by 2 (algorithm works on 2 blocks at a time)
reduceGrid.y = reduceShape.rowsPerGrid;
reduceGrid.z = 1;
bool bReduced = false;
bPingPong = true;
while (!bReduced)
{
// Ping Pong between "Distances" and "Reduce" vectors
if (bPingPong)
{
bPingPong = false;
// Call GPU Kernel to reduce result vector by THREADS_PER_BLOCK
hipLaunchKernelGGL(( Reduce_Min_GPU), dim3(reduceGrid), dim3(reduceThreads) , 0, 0, d_Reduce, d_Dists );
}
else
{
bPingPong = true;
// Call GPU Kernel to reduce result vector by THREADS_PER_BLOCK
hipLaunchKernelGGL(( Reduce_Min_GPU), dim3(reduceGrid), dim3(reduceThreads) , 0, 0, d_Dists, d_Reduce );
}
// Check if GPU kernel execution generated an error
cuda_err = hipGetLastError();
if( hipSuccess != cuda_err)
{
fprintf( stderr, "Cuda error: %s in file '%s' in line %i : %s.\n",
"PLQ_GPU_BF_DIST() failed", __FILE__, __LINE__, hipGetErrorString( cuda_err ) );
exit( EXIT_FAILURE );
}
// Update Number of elements in reduction vector
reduceElems = reduceShape.blocksPerGrid / 2; // Divide by 2 - Algorithm works on 2 columns of blocks at a time
if (reduceElems == 1)
{
bReduced = true;
}
else
{
// Update Shape of Grid
reduceShape.nElems = reduceElems;
reduceShape.threadsPerRow = BFMR_THREADS_PER_ROW;
reduceShape.rowsPerBlock = BFMR_ROWS_PER_BLOCK;
ComputeBlockShapeFromVector( reduceShape );
// Make sure we have an even number of blocks to work on
if ((reduceShape.blocksPerRow % 2) != 0)
{
// Error - not even number of blocks
fprintf( stderr, "Error - not an even number of blocks" );
return false;
}
reduceThreads.x = reduceShape.threadsPerRow;
reduceThreads.y = reduceShape.rowsPerBlock;
reduceThreads.z = 1;
reduceGrid.x = reduceShape.blocksPerRow / 2; // Divide by 2 (algorithm works on 2 blocks at a time)
reduceGrid.y = reduceShape.rowsPerGrid;
reduceGrid.z = 1;
}
}
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
BF_GPU_min += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
BF_GPU_min += sdkGetTimerValue( g_app.hTimer );
}
}
/*-------------------------------------------------
Step 8. Read Result from GPU
-------------------------------------------------*/
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Copy closest point result from device to host memory (singleton distance & index)
if (!bPingPong)
{
cuda_err = hipMemcpy( h_result_GPU, d_Reduce, mem_size_Result, hipMemcpyDeviceToHost );
}
else
{
cuda_err = hipMemcpy( h_result_GPU, d_Dists, mem_size_Result, hipMemcpyDeviceToHost );
}
if (hipSuccess != cuda_err)
{
fprintf( stderr, "Cuda error in file '%s' in line %i : %s.\n",
__FILE__, __LINE__, hipGetErrorString( cuda_err ) );
exit( EXIT_FAILURE );
}
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
BF_M_from_device += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
BF_M_from_device += sdkGetTimerValue( g_app.hTimer );
}
}
// Save Results
gpuMinDist = h_result_GPU[0].x;
gpuMinIdx = (unsigned int)(h_result_GPU[0].y);
/*-------------------------------------------------
Step 9. Double check GPU result
against CPU result
-------------------------------------------------*/
if (g_app.doubleCheckMin)
{
// BUGBUG - this is a temporary step to verify brute force distance calculation
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Compute reference solution (distances) on CPU
Reduce_Min_CPU( cpuMinIdx, cpuMinDist, h_Points, queryPoint, nOrig );
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
BF_CPU_min += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
BF_CPU_min += sdkGetTimerValue( g_app.hTimer );
}
}
//
// Double check GPU Result against CPU result
//
// Index check
if (gpuMinIdx != cpuMinIdx)
{
// Warning - Indices are not the same
// Note: This is not truly an error unless
// the closest distances also don't match
printf( "WARN - MIN GPU IDX %d != MIN CPU IDX %d \n", gpuMinIdx, cpuMinIdx );
}
// Distance Check
const float minEps = 1.0e-4f;
gVal = gpuMinDist;
cVal = cpuMinDist;
if ( ((cVal - minEps) >= gVal) ||
((cVal + minEps) <= gVal) )
{
// Error - Out of tolerance check range
printf( "ERR - MIN GPU DIST %f != MIN CPU DIST %f \n", i, gVal, cVal );
checkMinResults = false;
}
}
} // Profile Loops
/*--------------------------------------------------------
Step 11. Print out Results
--------------------------------------------------------*/
int vectLen = g_app.nSearch;
printf( "\n" );
printf( "Search Vector Length = %d\n", vectLen );
printf( "Query Point: <%f %f %f>\n",
queryPoint.x, queryPoint.y, queryPoint.z );
printf( "GPU Closest Distance: %f\n", gpuMinDist );
printf( "GPU Closest Index: %d\n", gpuMinIdx );
printf( "GPU Closest Point: <%f %f %f>\n",
h_Points[gpuMinIdx].x, h_Points[gpuMinIdx].y, h_Points[gpuMinIdx].z );
if (g_app.doubleCheckMin)
{
printf( "CPU Closest Distance: %f\n", cpuMinDist );
printf( "CPU Closest Index: %d\n", cpuMinIdx );
printf( "CPU Closest Point: <%f %f %f>\n",
h_Points[cpuMinIdx].x, h_Points[cpuMinIdx].y, h_Points[cpuMinIdx].z );
}
printf( "\n" );
/*--------------------------------------------------------
Step 12. Print out Profile Performance Metrics
--------------------------------------------------------*/
// Does GPU Distance Kernel match up with CPU ?!?
if (g_app.doubleCheckDists)
{
if (true == checkDistResults)
{
printf( "Distance check: CPU and GPU results agree within tolerance.\n" );
}
else
{
printf( "Distance check: CPU and GPU results don't agree within tolerance !!!\n" );
}
}
// Does GPU Min Distance Kernel match up with CPU ?!?
if (g_app.doubleCheckMin)
{
if (true == checkMinResults)
{
printf( "Min Distance check: CPU and GPU results agree within tolerance.\n" );
}
else
{
printf( "Min Distance check: CPU and GPU results don't agree within tolerance !!!\n" );
}
}
// Dump Profile Info
if (g_app.profile)
{
float loops = (float)g_app.profileActualLoops;
float o_l = 1.0f / loops;
float avgP = BF_P_onto_device * o_l;
float avgD = BF_D_from_device * o_l;
float avgM = BF_M_from_device * o_l;
float avgGPUdist = BF_GPU_dist * o_l;
float avgCPUdist = BF_CPU_dist * o_l;
float avgGPUmin = BF_GPU_min * o_l;
float avgCPUmin = BF_CPU_min * o_l;
// Verbose
printf( "Number of profile loops = %f.\n", loops );
printf( "BF - Copy 'Point' vector onto GPU, time: %f msecs.\n", avgP );
printf( "BF - Copy 'Dists' vector from GPU, time: %f msecs.\n", avgD );
printf( "BF - Copy 'Results' from GPU, time: %f msecs.\n", avgM );
printf( "BF - GPU Distance computation, time: %f msecs.\n", avgGPUdist );
printf( "BF - CPU Distance computation, time: %f msecs.\n", avgCPUdist );
printf( "BF - GPU Min Distance computation, time: %f msecs.\n", avgGPUmin );
printf( "BF - CPU Min Distance computation, time: %f msecs.\n\n", avgCPUmin );
// Terse
//printf( "BF - P, D, M, G_D, C_D, G_M, C_M\n" );
//printf( " %f, %f, %f, %f, %f, %f, %f\n\n", avgP, avgD, avgM, avgGPUdist, avgCPUdist, avgGPUmin, avgCPUmin );
}
else
{
printf( "BF - Copy 'Point' vector onto GPU, time: %f msecs.\n", BF_P_onto_device );
printf( "BF - Copy 'Dists' vector from GPU, time: %f msecs.\n", BF_D_from_device );
printf( "BF - Copy 'Results' from GPU, time: %f msecs.\n", BF_M_from_device );
printf( "BF - GPU Distance computation, time: %f msecs.\n", BF_GPU_dist );
printf( "BF - CPU Distance computation, time: %f msecs.\n", BF_CPU_dist );
printf( "BF - GPU Min Distance computation, time: %f msecs.\n", BF_GPU_min );
printf( "BF - CPU Min Distance computation, time: %f msecs.\n\n", BF_CPU_min );
}
/*---------------------------------
Step 13. Cleanup vector memory
---------------------------------*/
printf( "Shutting Down...\n" );
// clean up allocations
free( h_Points );
free( h_Dists_GPU );
free( h_Dists_CPU );
free( h_result_GPU );
sdkDeleteTimer( g_app.hTimer );
checkCudaErrors( hipFree( d_Points ) );
checkCudaErrors( hipFree( d_Dists ) );
checkCudaErrors( hipFree( d_Reduce ) );
printf( "Shutdown done...\n\n" );
#endif
// Success
return true;
}
/*---------------------------------------------------------
Name: ComputeBlockShapeFromVector
Desc:
Notes:
0. Assumes following members are initialized properly
before this funciton is called
shape.nElems = Number of original elements in vector
shape.tpr = threads per row
shape.rpb = rows per block
1. Block Limits
Thread block has at most 512 theads per block
2. Grid Limits
Grid has at most 65,535 blocks in any dimension
So a 1D grid is at most 65,535 x 1
and a 2D grid is at most 65,535 x 65,535
We use next smallest even number to these limits
IE 65,535 - 1
IE (65,535*65,535 - 1)
It's useful to have an even number of columns
in grid structure when doing reductions
---------------------------------------------------------*/
bool ComputeBlockShapeFromVector
(
BlockGridShape & bgShape // IN/OUT - bgShape
)
{
unsigned int remainder, extra;
//---------------------------------
// 1. Compute Threads Per Block
//---------------------------------
if ((bgShape.threadsPerRow > 512) || (bgShape.rowsPerBlock > 512))
{
// Error - block can't have more than 512 threads
printf( "block (%d TPR x %d RPB) > 512 TPB\n",
bgShape.threadsPerRow, bgShape.rowsPerBlock );
printf( "Error - can't have more than 512 threads in block" );
}
// Compute threads per block
bgShape.threadsPerBlock = bgShape.threadsPerRow * bgShape.rowsPerBlock;
// Make sure we don't exceed block limits
if (bgShape.threadsPerBlock > 512)
{
// Error - block can't have more than 512 threads
printf( "block (%d TPR x %d RPB) = %d TPB\n",
bgShape.threadsPerRow, bgShape.rowsPerBlock, bgShape.threadsPerBlock );
printf( "Error - can't have more than 512 threads in block" );
return false;
}
//---------------------------------
// 2. Compute GRID structure
//---------------------------------
// Compute number of blocks needed to contain all elements in vector
bgShape.blocksPerGrid = bgShape.nElems / bgShape.threadsPerBlock;
remainder = bgShape.nElems % bgShape.threadsPerBlock;
extra = ((0 == remainder) ? 0 : 1);
bgShape.blocksPerGrid += extra;
// Check if need a 1D Grid or 2D Grid structure
// to contain all blocks in grid
if (bgShape.blocksPerGrid <= 65534)
{
// We can use a simple 1D grid of blocks
bgShape.blocksPerRow = bgShape.blocksPerGrid;
bgShape.rowsPerGrid = 1;
}
else if (bgShape.blocksPerGrid <= 4294836224) // 4294836224 = (65535 * 65535 - 1)
{
// We need to use a 2D Grid structure instead...
// Use square root as an approximation for shape of 2D grid
float sq_r = sqrtf( (float)( bgShape.blocksPerGrid ) );
unsigned int uiSqrt = (unsigned int)sq_r;
bgShape.blocksPerRow = uiSqrt;
bgShape.rowsPerGrid = uiSqrt;
// Increment # of columns until we have enough space
// in grid layout for all elements in vector
while ((bgShape.blocksPerRow * bgShape.rowsPerGrid) < bgShape.blocksPerGrid)
{
bgShape.blocksPerRow++;
}
}
else
{
// Error - Vector is too large for 2D Grid
printf( "Vector is way too large...\n" );
return false;
}
// Make sure # of columns in 1D or 2D grid is even
// Which is useful to avoid special cases in reduction kernel
remainder = bgShape.blocksPerRow % 2;
extra = ((0 == remainder) ? 0 : 1);
bgShape.blocksPerRow += extra;
// Compute # of padded blocks in Grid
bgShape.blocksPerGrid = bgShape.blocksPerRow * bgShape.rowsPerGrid;
// Make sure we don't exceed grid limits
if ((bgShape.blocksPerRow >= 65535) || (bgShape.rowsPerGrid >= 65535))
{
// Error - Grid can't have more than 65535 blocks in any dimension
printf( "Grid (%d BPR x %d RPG) = %d BPG\n",
bgShape.blocksPerRow, bgShape.rowsPerGrid, bgShape.blocksPerGrid );
printf( "Error - can't have more than 65535 blocks in any dimension\n" );
return false;
}
// Compute Width and Height of 2D vector structure
bgShape.W = bgShape.threadsPerRow * bgShape.blocksPerRow; // Width (in elements) of 2D vector
bgShape.H = bgShape.rowsPerBlock * bgShape.rowsPerGrid; // Height (in elements) of 2D vector
// Compute padded length of 2D vector
unsigned int sizeWH = bgShape.W * bgShape.H;
unsigned int sizeBG = bgShape.threadsPerBlock * bgShape.blocksPerGrid;
if (sizeWH != sizeBG)
{
// Programmer error-
printf( "Error - sizes don't match\n" );
return false;
}
// Compute number of elements in padded block structure
bgShape.nPadded = sizeWH;
// Success
return true;
}
/*---------------------------------------------------------
Name: ComputeBlockShapeFromQueryVector
Desc:
Notes:
0. Assumes following members are initialized properly
before this function is called
shape.nElems = Number of original elements in query vector
shape.tpr = threads per row
shape.rpb = rows per block
1. Block Limits
Thread block has at most 512 theads per block
2. Grid Limits
Grid has at most 65,535 blocks in any dimension
So a 1D grid is at most 65,535 x 1
and a 2D grid is at most 65,535 x 65,535
We use next smallest even number to these limits
IE 65,535 - 1
IE (65,535*65,535 - 1)
---------------------------------------------------------*/
bool ComputeBlockShapeFromQueryVector
(
BlockGridShape & bgShape // IN/OUT - bgShape
)
{
unsigned int remainder, extra;
//---------------------------------
// 1. Compute Threads Per Block
//---------------------------------
if ((bgShape.threadsPerRow > 512) || (bgShape.rowsPerBlock > 512))
{
// Error - block can't have more than 512 threads
printf( "block (%d TPR x %d RPB) > 512 TPB\n",
bgShape.threadsPerRow, bgShape.rowsPerBlock );
printf( "Error - can't have more than 512 threads in block" );
}
// Compute threads per block
bgShape.threadsPerBlock = bgShape.threadsPerRow * bgShape.rowsPerBlock;
// Make sure we don't exceed block limits
if (bgShape.threadsPerBlock > 512)
{
// Error - block can't have more than 512 threads
printf( "block (%d TPR x %d RPB) = %d TPB\n",
bgShape.threadsPerRow, bgShape.rowsPerBlock, bgShape.threadsPerBlock );
printf( "Error - can't have more than 512 threads in block" );
return false;
}
//---------------------------------
// 2. Compute GRID structure
//---------------------------------
// Compute number of blocks needed to contain all elements in vector
bgShape.blocksPerGrid = bgShape.nElems / bgShape.threadsPerBlock;
remainder = bgShape.nElems % bgShape.threadsPerBlock;
extra = ((0 == remainder) ? 0 : 1);
bgShape.blocksPerGrid += extra;
// Check if need a 1D Grid or 2D Grid structure
// to contain all blocks in grid
if (bgShape.blocksPerGrid <= 65534)
{
// We can use a simple 1D grid of blocks
bgShape.blocksPerRow = bgShape.blocksPerGrid;
bgShape.rowsPerGrid = 1;
}
else if (bgShape.blocksPerGrid <= 4294836224) // 4294836224 = (65535 * 65535 - 1)
{
// We need to use a 2D Grid structure instead...
// Use square root as an approximation for shape of 2D grid
float sq_r = sqrtf( (float)( bgShape.blocksPerGrid ) );
unsigned int uiSqrt = (unsigned int)sq_r;
bgShape.blocksPerRow = uiSqrt;
bgShape.rowsPerGrid = uiSqrt;
// Increment # of columns until we have enough space
// in grid layout for all elements in vector
while ((bgShape.blocksPerRow * bgShape.rowsPerGrid) < bgShape.blocksPerGrid)
{
bgShape.blocksPerRow++;
}
}
else
{
// Error - Vector is too large for 2D Grid
printf( "Vector is way too large...\n" );
return false;
}
// Make sure we don't exceed grid limits
if ((bgShape.blocksPerRow >= 65535) || (bgShape.rowsPerGrid >= 65535))
{
// Error - Grid can't have more than 65535 blocks in any dimension
printf( "Grid (%d BPR x %d RPG) = %d BPG\n",
bgShape.blocksPerRow, bgShape.rowsPerGrid, bgShape.blocksPerGrid );
printf( "Error - can't have more than 65535 blocks in any dimension\n" );
return false;
}
// Compute # of padded blocks in Grid
bgShape.blocksPerGrid = bgShape.blocksPerRow * bgShape.rowsPerGrid;
// Compute Width and Height of 2D vector structure
bgShape.W = bgShape.threadsPerRow * bgShape.blocksPerRow; // Width (in elements) of 2D vector
bgShape.H = bgShape.rowsPerBlock * bgShape.rowsPerGrid; // Height (in elements) of 2D vector
// Compute padded length of 2D vector
unsigned int sizeWH = bgShape.W * bgShape.H;
unsigned int sizeBG = bgShape.threadsPerBlock * bgShape.blocksPerGrid;
if (sizeWH != sizeBG)
{
// Programmer error-
printf( "Error - sizes don't match\n" );
return false;
}
// Compute number of elements in padded block structure
bgShape.nPadded = sizeWH;
// Success
return true;
}
/*---------------------------------------------------------
Name: RandomFloat
Desc: Generates a random float value in range [low,high]
---------------------------------------------------------*/
float RandomFloat( float low, float high )
{
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
/*---------------------------------------------------------
Name: InitCUDA
Desc: Initialize CUDA system for GPU processing
---------------------------------------------------------*/
// Runtime API version...
bool InitCUDA( AppGlobals & g )
{
bool bResult = false;
int nDevices = 0;
int deviceToUse = 0;
unsigned int cudaContextFlags = 0;
hipError_t cudaResult = hipSuccess;
#if (CUDA_PLATFORM == CUDA_DEVICE)
hipError_t cuda_Result = hipSuccess;
// Initialize CUDA
unsigned int cudaFlags = 0;
cuda_Result = hipInit( cudaFlags );
if (hipSuccess != cuda_Result)
{
// Error - hipGetDeviceCount() failed
fprintf( stderr, "InitCuda() - hipInit() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
// Get count of CUDA Devices
cuda_Result = hipGetDeviceCount(&nDevices);
if (hipSuccess != cuda_Result)
{
// Error - hipGetDeviceCount() failed
fprintf( stderr, "InitCuda() - hipGetDeviceCount() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
if (nDevices <= 0)
{
// No Valid Display Device found
cuda_Result = hipErrorInvalidDevice;
fprintf( stderr, "InitCuda() - no valid display device found, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
else if (nDevices >= 2)
{
deviceToUse = 1;
}
// Get Specified Device
cuda_Result = hipDeviceGet( &(g.currDevice), deviceToUse );
if (hipSuccess != cudaResult )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - hipDeviceGet() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
// Get RAW Device Properties
cuda_Result = hipGetDeviceProperties( &(g.rawProps), g.currDevice );
if (hipSuccess != cuda_Result )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - hipGetDeviceProperties() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
// Set up the CUDA context
cuda_Result = hipCtxCreate( &g.currContext, cudaContextFlags, g.currDevice );
if ( hipSuccess != cuda_Result )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - hipCtxCreate() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
// Get CUDA Display Device Properties
cudaResult = hipGetDeviceProperties( &(g.cudaProps) , deviceToUse );
if ( hipSuccess != cudaResult )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - hipGetDeviceProperties() failed, error = %x in file '%s' in line %i.\n",
cudaResult, __FILE__, __LINE__ );
goto lblError;
}
#elif (CUDA_PLATFORM == CUDA_CUDA)
// Pick Display Device to perform GPU calculations on...
cudaResult = hipGetDeviceCount( &nDevices );
if ( hipSuccess != cudaResult )
{
// Error - hipGetDeviceCount() failed
fprintf( stderr, "InitCuda() - hipGetDeviceCount() failed, error = %x in file '%s' in line %i.\n",
cudaResult, __FILE__, __LINE__ );
goto lblError;
}
// Note: Assumes Device 0 = primary display device
// Assumes Device 1 = work horse for CUDA
if (nDevices <= 0)
{
// No Valid Display Device found
cudaResult = hipErrorInvalidDevice;
fprintf( stderr, "InitCuda() - no valid display device found, error = %x in file '%s' in line %i.\n",
cudaResult, __FILE__, __LINE__ );
goto lblError;
}
else if (nDevices >= 2)
{
deviceToUse = 1;
}
// Get Display Device Properties
cudaResult = hipGetDeviceProperties( &(g.cudaProps) , deviceToUse );
if ( hipSuccess != cudaResult )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - hipGetDeviceProperties() failed, error = %x in file '%s' in line %i.\n",
cudaResult, __FILE__, __LINE__ );
goto lblError;
}
// Setup Display Device
cudaResult = hipSetDevice( deviceToUse );
if ( hipSuccess != cudaResult )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - hipSetDevice() failed, error = %x in file '%s' in line %i.\n",
cudaResult, __FILE__, __LINE__ );
goto lblError;
}
#endif // CUDA_CUDA
// Success
bResult = true;
lblError:
return bResult;
}
//---------------------------------------------------------
// Name: FiniCUDA
// Desc: Cleanup CUDA system
//---------------------------------------------------------
bool FiniCUDA()
{
#if (CUDA_PLATFORM == CUDA_DEVICE)
// Detach CUDA from current thread
hipError_t cuda_Result = hipSuccess;
cuda_Result = hipCtxDetach( g_app.currContext );
if (hipSuccess != cuda_Result)
{
// Error - hipCtxDetach() failed
fprintf( stderr, "FiniCUDA() - hipCtxDetach() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
return false;
}
#endif
// Success
return true;
}
/*---------------------------------------------------------
Name: InitGlobals
Desc: Initialize Application Globals to Default
---------------------------------------------------------*/
bool InitGlobals( AppGlobals & g )
{
//
// Set Defaults
//
// Search Vectors
g.nSearch = 100;
g.searchList = NULL;
g.nQuery = 10;
g.queryList = NULL;
// Cuda Properties
size_t byteSize;
#if (CUDA_PLATFORM == CUDA_DEVICE)
g.currDevice = 0;
// Initialize cuda device props to zero
byteSize = sizeof( g.rawProps );
memset( &g.rawProps, 0, byteSize );
#endif
// Initialize cuda props to zero
byteSize = sizeof( g.cudaProps );
memset( &g.cudaProps, 0, byteSize );
// Init Block Grid Shape
InitShapeDefaults( g.bgShape );
// App Properties
g.nopromptOnExit = 0;
g.doubleCheckDists = 1;
// Profiling Info
g.hTimer = 0;
g.profile = 1;
g.profileSkipFirstLast = 0;
g.profileRequestedLoops = 1;
g.profileActualLoops = 1;
return true;
}
/*---------------------------------------------------------
Name: GetCommandLineParameters
Desc:
---------------------------------------------------------*/
bool GetCommandLineParams
(
int argc, // Count of Command Line Parameters
const char** argv, // List of Command Line Parameters
AppGlobals & g // Structure to store results in
)
{
int iVal;
// Prompt before exiting application ?!?
if (cutCheckCmdLineFlag( argc, argv, "noprompt") )
{
g.nopromptOnExit = true;
}
else
{
g.nopromptOnExit = false;
}
// Double Check Distances
if (cutCheckCmdLineFlag( argc, argv, "cdist") )
{
g.doubleCheckDists = true;
}
else
{
g.doubleCheckDists = false;
}
// Double Check Distances
if (cutCheckCmdLineFlag( argc, argv, "cmin") )
{
g.doubleCheckMin = true;
}
else
{
g.doubleCheckMin = false;
}
// Get # Threads Per Row (block shape)
if (cutGetCmdLineArgumenti( argc, argv, "TPR", &iVal ))
{
if (iVal < 1) { iVal = 1; }
g.bgShape.threadsPerRow = iVal;
}
// Get # Rows Per Block
if (cutGetCmdLineArgumenti( argc, argv, "RPB", &iVal ))
{
if (iVal < 1) { iVal = 1; }
g.bgShape.rowsPerBlock = iVal;
}
// Calculate Threads Per Block
g.bgShape.threadsPerBlock = g.bgShape.threadsPerRow * g.bgShape.rowsPerBlock;
if (g.bgShape.threadsPerBlock > 512)
{
// Error - Can't have more than 512 threads per block
printf( "Max Threads Per Block is 512!!!\n\n" );
return false;
}
// Get search Vector Length
if (cutGetCmdLineArgumenti( argc, argv, "N", &iVal ))
{
if (iVal < 1) { iVal = 10000; }
g.nSearch = (int)iVal;
}
// Get Query Vector Length
if (cutGetCmdLineArgumenti( argc, argv, "NQ", &iVal ))
{
if (iVal < 1) { iVal = 100; }
g.nQuery = (int)iVal;
}
// Should we do profiling (performance measurements) on application
if (cutCheckCmdLineFlag( argc, argv, "profile") )
{
g.profile = true;
}
else
{
g.profile = false;
}
if (g.profile)
{
// Get Skip First Last flag
if (cutCheckCmdLineFlag( argc, argv, "skip") )
{
g.profileSkipFirstLast = true;
}
else
{
g.profileSkipFirstLast = false;
}
// Get Number of Iterations for Profiling performance
if (cutGetCmdLineArgumenti( argc, argv, "profile", &iVal ))
{
if (iVal < 1) { iVal = 100; }
g.profileRequestedLoops = iVal;
if (g.profileSkipFirstLast)
{
g.profileActualLoops = g.profileRequestedLoops + 2;
}
else
{
g.profileActualLoops = g.profileRequestedLoops;
}
}
}
// Success
return true;
}
/*---------------------------------------------------------
Name: InitShapeDefaults
---------------------------------------------------------*/
void InitShapeDefaults( BlockGridShape & bgShape )
{
// Default Thread, Grid, Vector Properties
bgShape.nElems = 100;
bgShape.threadsPerRow = 1;
bgShape.rowsPerBlock = 1;
bgShape.threadsPerBlock = bgShape.threadsPerRow * bgShape.rowsPerBlock;
bgShape.blocksPerRow = 100;
bgShape.rowsPerGrid = 1;
bgShape.blocksPerGrid = bgShape.blocksPerRow * bgShape.rowsPerGrid;
bgShape.W = bgShape.threadsPerRow * bgShape.blocksPerRow;
bgShape.H = bgShape.rowsPerBlock * bgShape.rowsPerGrid;
bgShape.nPadded = bgShape.W * bgShape.H;
}
/*---------------------------------------------------------
Name: DumpBlockGridShape
---------------------------------------------------------*/
void DumpBlockGridShape( BlockGridShape & bgShape )
{
printf( "N = %d, NPadded = %d\n",
bgShape.nElems, bgShape.nPadded );
printf( "Block (%d TPR x %d RPB) = %d TPB\n",
bgShape.threadsPerRow, bgShape.rowsPerBlock,
bgShape.threadsPerBlock );
printf( "Grid (%d BPR x %d RPG) = %d BPG\n",
bgShape.blocksPerRow, bgShape.rowsPerGrid,
bgShape.blocksPerGrid );
printf( "W = %d, H = %d\n",
bgShape.W, bgShape.H );
}
| 228c9ea2fd07261ff4d6a8f9e2e163d0be0307da.cu | /*-----------------------------------------------------------------------------
File: Median_Test.cpp
Desc: Runs Median Test
-----------------------------------------------------------------------------*/
/*-------------------------------------
Includes
-------------------------------------*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, CUDA
#include <cutil_inline.h>
// includes, project
#include "KD_API.h"
/*-------------------------------------
Global Variables
-------------------------------------*/
extern AppGlobals g_app;
/*-------------------------------------
CUDA Kernels
-------------------------------------*/
//#include <Median_GPU.cu>
/*-------------------------------------
Function Declarations
-------------------------------------*/
bool RunMedianTest();
/*---------------------------------------------------------
Name: RunMedianTest()
Desc: Run a simple test of "Median Partition"
functionality on CUDA GPU framework
---------------------------------------------------------*/
bool RunMedianTest()
{
bool bResult = false;
#if 0
/*---------------------------------
Step 0. Initialize Cuda
---------------------------------*/
cudaError_t cuda_err = cudaSuccess;
// set seed for rand()
srand( 2009 );
g_app.hTimer = 0;
sdkCreateTimer( &(g_app.hTimer) );
/*-------------------------------------------
Step 1. Setup Initial parameters
-------------------------------------------*/
// Hard Coded for now...
g_app.bgShape.nElems = g_app.nSearch;
g_app.bgShape.threadsPerRow = MEDIAN_THREADS_PER_ROW;
g_app.bgShape.rowsPerBlock = MEDIAN_ROWS_PER_BLOCK;
bResult = ComputeBlockShapeFromVector( g_app.bgShape );
if (false == bResult)
{
// Error
return false;
}
// Make sure Matrix + vector is not to big to use up all device memory // 768 Meg on Display Card
int sizePoints = g_app.bgShape.nPadded * sizeof(float4);
int sizeDists = g_app.bgShape.nPadded * sizeof(float2);
int totalMem = sizePoints + (2*sizeDists);
// Make sure memory required to perform this operation doesn't exceed display device memory
if (totalMem >= g_app.cudaProps.totalGlobalMem)
{
// Error - not enough memory to perform operation
printf( "Matrix + Vector are too large for available device memory, running test will crash..." );
return false;
}
// Setup GPU Kernel execution parameters
// Median Sort Kernel
dim3 dimBlock( g_app.bgShape.threadsPerRow, g_app.bgShape.rowsPerBlock, 1 );
dim3 dimGrid( g_app.bgShape.blocksPerRow, g_app.bgShape.rowsPerGrid, 1 );
/*-------------------------------------------
Step 2. Allocate Vectors
-------------------------------------------*/
int nOrig = g_app.bgShape.nElems;
int nPad = g_app.bgShape.nPadded;
int w = g_app.bgShape.W;
int h = g_app.bgShape.H;
/*-----------------------
Host Memory
-----------------------*/
// allocate host memory for original points (before median sort)
int mem_size_Points = nPad * sizeof(float4);
float4* h_Points_Orig = (float4*) malloc( (size_t)mem_size_Points );
// allocate host memory for point results (after median sort)
float4 *h_Points_Result = (float4*) malloc( mem_size_Points );
// allocate host memory for CPU point results (after median sort)
float4 *h_Points_CPU = (float4*) malloc( mem_size_Points );
// Allocate host memory for singleton median index result
unsigned int mem_size_Result = 16 * sizeof(I32);
I32 *h_result_GPU = (I32 *) malloc( mem_size_Result );
h_result_GPU[0] = -1;
/*-----------------------
Device Memory
-----------------------*/
// allocate device memory for points
float4* d_Points;
checkCudaErrors( cudaMalloc( (void**) &d_Points, mem_size_Points ) );
// allocate device memory for points
I32* d_result_GPU;
checkCudaErrors( cudaMalloc( (void**) &d_result_GPU, mem_size_Result ) );
// allocate device memory for Reduction Vector
// Used for reduction
// IE Ping Pong between dists vector and reduce vector to get answer
// to get final answer
//bool bPingPong = true;
//float4* d_Reduce;
//checkCudaErrors( cudaMalloc( (void **) &d_Reduce, mem_size_Points ) );
/*-------------------------------------------
Step 3. Initialize Vectors
-------------------------------------------*/
// Initialize Input points (to query against)
int i;
for (i = 0; i < nOrig; i++) // Original Points
{
// BUGBUG - for now just randomly generate points
// In future - we should read them in from a file...
h_Points_Orig[i].x = RandomFloat( 0.0, 1.0 );
h_Points_Orig[i].y = RandomFloat( 0.0, 1.0 );
h_Points_Orig[i].z = RandomFloat( 0.0, 1.0 );
// Store point index in this channel
h_Points_Orig[i].w = (float)i;
}
// Initialize padded points (to query against)
for (i = nOrig; i < nPad; i++) // Padded points
{
// We want padded points to always fail...
// 1st Approach,
// Use a point that is so far away it is guranteed to never get picked
// Cons: Requires advance knowledge of input point range
// and query point range to pick a point
// so far outside range it doesn't matter
// 2nd Approach,
// Duplicate the 1st point many times
// Cons: Can fail because of numerical round-off errors
// IE what if the 1st point is really the closest to the query point
// which point wins (1st point or one of it's duplicates)
//
// 1st Approach
//
h_Points[i].x = 400.0f; // Note: Any number much larger than 46,000 and we will overflow on squaring the float
h_Points[i].y = 400.0f;
h_Points[i].z = 400.0f;
h_Points[i].w = (float)-1; // Store invalid point index in this channel
//
// 2nd Approach
//
//h_Points[i].x = h_Points[0].x;
//h_Points[i].y = h_Points[0].y;
//h_Points[i].z = h_Points[0].z;
//h_Points[i].w = h_Points[0].w;
}
//
// Profile Performance Metric Initialization
//
float MED_PNT_onto_device = 0.0f;
float MED_PNT_from_device = 0.0f;
float MED_M_from_device = 0.0f;
float MED_GPU_Kernel = 0.0f;
float MED_CPU_Kernel = 0.0f;
bool checkMedianResults = true;
// Result values
int gpuMedianIdx; // Index of Median Point as computed on GPU
int cpuMedianIdx; // Index of Median Point as computed on CPU
// Profile Measurement Loop
unsigned int currIter;
for (currIter = 0; currIter < g_app.profileActualLoops; currIter++)
{
//-------------------------------------------------------
// Step 3. Move Points (& indices)
// from main memory to device memory
//-------------------------------------------------------
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Copy 'Points' vector from host memory to device memory
checkCudaErrors( cudaMemcpy( d_Points, h_Points_Orig, mem_size_Points, cudaMemcpyHostToDevice ) );
// Copy 'Initial' result vector from host memory to device memory
checkCudaErrors( cudaMemcpy( d_result_GPU, h_result_GPU, mem_size_Results, cudaMemcpyHostToDevice ) );
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
MED_PNT_onto_device += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
MED_PNT_onto_device += sdkGetTimerValue( g_app.hTimer );
}
}
//---------------------------------
// Step 4. Call Kernel Function
//---------------------------------
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Excute the Brute Force Distance Kernel
MedianSort_GPU<<< dimGrid, dimBlock >>>( d_Points, w, h );
// Check if GPU kernel execution generated an error
cuda_err = cudaGetLastError();
if( cudaSuccess != cuda_err)
{
fprintf( stderr, "Cuda error: %s in file '%s' in line %i : %s.\n",
"MedianSort_GPU() failed", __FILE__, __LINE__, cudaGetErrorString( cuda_err ) );
exit( EXIT_FAILURE );
}
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
MED_GPU_Kernel += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
MED_GPU_Kernel += sdkGetTimerValue( g_app.hTimer );
}
}
//-------------------------------------------------
// Step 5. Copy result vector (partitioned points)
// from device memory to main memory
//-------------------------------------------------
if (g_app.doubleCheckDists)
{
// BUGBUG - this is a temporary step to verify brute force distance calculation
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// copy result vector from device to host
checkCudaErrors( cudaMemcpy( (void *) h_Points_Results, d_Points, mem_size_Points, cudaMemcpyDeviceToHost ) );
// copy singleton median index from device to host
checkCudaErrors( cudaMemcpy( (void *) h_results_GPU, d_results_GPU, mem_size_Results, cudaMemcpyDeviceToHost ) );
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
MED_PNT_from_device += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
MED_PNT_from_device += sdkGetTimerValue( g_app.hTimer );
}
}
}
/*-------------------------------------------------
Step 6. Double check GPU result
against CPU result
-------------------------------------------------*/
if (g_app.doubleCheckDists)
{
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Compute reference solution (distances) on CPU
h_CPU_Idx = MedianSort_CPU( h_Points_CPU, h_Points_Orig, w, h );
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
MED_CPU_Kernel += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
MED_CPU_Kernel += sdkGetTimerValue( g_app.hTimer );
}
}
// Double check GPU Result against CPU result (for distances)
int NCheck = nPad;
int i;
for (i = 0; i < NCheck; i++)
{
const float eps = 1.0e-2f;
//printf( "[%d] GPU=%f, CPU=%f \n", i, gVal, cVal );
if ( ((cVal - eps) >= gVal) ||
((cVal + eps) <= gVal) )
{
// Error - Out of tolerance check range
printf( "[%d] GPU %f != CPU %f \n", i, gVal, cVal );
checkDistResults = false;
}
}
} // double check distances
/*-------------------------------------------------
Step 7. GPU Kernel to reduce distances
(& index) vector
to single best result
-------------------------------------------------*/
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Copy 'Distances' vector to 'Reduction' vector
// This is currently necessary to avoid garbage
// results in output caused by unitialized values
checkCudaErrors( cudaMemcpy( d_Reduce, d_Dists, mem_size_Dists_GPU, cudaMemcpyDeviceToDevice ) );
int reduceElems = nPad;
dim3 reduceThreads;
dim3 reduceGrid;
BlockGridShape reduceShape;
// Compute Initial Grid Shape
reduceShape.nElems = reduceElems;
reduceShape.threadsPerRow = BFMR_THREADS_PER_ROW;
reduceShape.rowsPerBlock = BFMR_ROWS_PER_BLOCK;
ComputeBlockShapeFromVector( reduceShape );
// Make sure we have an even number of blocks to work on
if ((reduceShape.blocksPerRow % 2) != 0)
{
// Error - not an even number of blocks
fprintf( stderr, "Error - not an even number of blocks\n" );
return false;
}
reduceThreads.x = reduceShape.threadsPerRow;
reduceThreads.y = reduceShape.rowsPerBlock;
reduceThreads.z = 1;
reduceGrid.x = reduceShape.blocksPerRow / 2; // Divide by 2 (algorithm works on 2 blocks at a time)
reduceGrid.y = reduceShape.rowsPerGrid;
reduceGrid.z = 1;
bool bReduced = false;
bPingPong = true;
while (!bReduced)
{
// Ping Pong between "Distances" and "Reduce" vectors
if (bPingPong)
{
bPingPong = false;
// Call GPU Kernel to reduce result vector by THREADS_PER_BLOCK
Reduce_Min_GPU<<< reduceGrid, reduceThreads >>>( d_Reduce, d_Dists );
}
else
{
bPingPong = true;
// Call GPU Kernel to reduce result vector by THREADS_PER_BLOCK
Reduce_Min_GPU<<< reduceGrid, reduceThreads >>>( d_Dists, d_Reduce );
}
// Check if GPU kernel execution generated an error
cuda_err = cudaGetLastError();
if( cudaSuccess != cuda_err)
{
fprintf( stderr, "Cuda error: %s in file '%s' in line %i : %s.\n",
"PLQ_GPU_BF_DIST() failed", __FILE__, __LINE__, cudaGetErrorString( cuda_err ) );
exit( EXIT_FAILURE );
}
// Update Number of elements in reduction vector
reduceElems = reduceShape.blocksPerGrid / 2; // Divide by 2 - Algorithm works on 2 columns of blocks at a time
if (reduceElems == 1)
{
bReduced = true;
}
else
{
// Update Shape of Grid
reduceShape.nElems = reduceElems;
reduceShape.threadsPerRow = BFMR_THREADS_PER_ROW;
reduceShape.rowsPerBlock = BFMR_ROWS_PER_BLOCK;
ComputeBlockShapeFromVector( reduceShape );
// Make sure we have an even number of blocks to work on
if ((reduceShape.blocksPerRow % 2) != 0)
{
// Error - not even number of blocks
fprintf( stderr, "Error - not an even number of blocks" );
return false;
}
reduceThreads.x = reduceShape.threadsPerRow;
reduceThreads.y = reduceShape.rowsPerBlock;
reduceThreads.z = 1;
reduceGrid.x = reduceShape.blocksPerRow / 2; // Divide by 2 (algorithm works on 2 blocks at a time)
reduceGrid.y = reduceShape.rowsPerGrid;
reduceGrid.z = 1;
}
}
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
BF_GPU_min += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
BF_GPU_min += sdkGetTimerValue( g_app.hTimer );
}
}
/*-------------------------------------------------
Step 8. Read Result from GPU
-------------------------------------------------*/
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Copy closest point result from device to host memory (singleton distance & index)
if (!bPingPong)
{
cuda_err = cudaMemcpy( h_result_GPU, d_Reduce, mem_size_Result, cudaMemcpyDeviceToHost );
}
else
{
cuda_err = cudaMemcpy( h_result_GPU, d_Dists, mem_size_Result, cudaMemcpyDeviceToHost );
}
if (cudaSuccess != cuda_err)
{
fprintf( stderr, "Cuda error in file '%s' in line %i : %s.\n",
__FILE__, __LINE__, cudaGetErrorString( cuda_err ) );
exit( EXIT_FAILURE );
}
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
BF_M_from_device += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
BF_M_from_device += sdkGetTimerValue( g_app.hTimer );
}
}
// Save Results
gpuMinDist = h_result_GPU[0].x;
gpuMinIdx = (unsigned int)(h_result_GPU[0].y);
/*-------------------------------------------------
Step 9. Double check GPU result
against CPU result
-------------------------------------------------*/
if (g_app.doubleCheckMin)
{
// BUGBUG - this is a temporary step to verify brute force distance calculation
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Compute reference solution (distances) on CPU
Reduce_Min_CPU( cpuMinIdx, cpuMinDist, h_Points, queryPoint, nOrig );
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
BF_CPU_min += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
BF_CPU_min += sdkGetTimerValue( g_app.hTimer );
}
}
//
// Double check GPU Result against CPU result
//
// Index check
if (gpuMinIdx != cpuMinIdx)
{
// Warning - Indices are not the same
// Note: This is not truly an error unless
// the closest distances also don't match
printf( "WARN - MIN GPU IDX %d != MIN CPU IDX %d \n", gpuMinIdx, cpuMinIdx );
}
// Distance Check
const float minEps = 1.0e-4f;
gVal = gpuMinDist;
cVal = cpuMinDist;
if ( ((cVal - minEps) >= gVal) ||
((cVal + minEps) <= gVal) )
{
// Error - Out of tolerance check range
printf( "ERR - MIN GPU DIST %f != MIN CPU DIST %f \n", i, gVal, cVal );
checkMinResults = false;
}
}
} // Profile Loops
/*--------------------------------------------------------
Step 11. Print out Results
--------------------------------------------------------*/
int vectLen = g_app.nSearch;
printf( "\n" );
printf( "Search Vector Length = %d\n", vectLen );
printf( "Query Point: <%f %f %f>\n",
queryPoint.x, queryPoint.y, queryPoint.z );
printf( "GPU Closest Distance: %f\n", gpuMinDist );
printf( "GPU Closest Index: %d\n", gpuMinIdx );
printf( "GPU Closest Point: <%f %f %f>\n",
h_Points[gpuMinIdx].x, h_Points[gpuMinIdx].y, h_Points[gpuMinIdx].z );
if (g_app.doubleCheckMin)
{
printf( "CPU Closest Distance: %f\n", cpuMinDist );
printf( "CPU Closest Index: %d\n", cpuMinIdx );
printf( "CPU Closest Point: <%f %f %f>\n",
h_Points[cpuMinIdx].x, h_Points[cpuMinIdx].y, h_Points[cpuMinIdx].z );
}
printf( "\n" );
/*--------------------------------------------------------
Step 12. Print out Profile Performance Metrics
--------------------------------------------------------*/
// Does GPU Distance Kernel match up with CPU ?!?
if (g_app.doubleCheckDists)
{
if (true == checkDistResults)
{
printf( "Distance check: CPU and GPU results agree within tolerance.\n" );
}
else
{
printf( "Distance check: CPU and GPU results don't agree within tolerance !!!\n" );
}
}
// Does GPU Min Distance Kernel match up with CPU ?!?
if (g_app.doubleCheckMin)
{
if (true == checkMinResults)
{
printf( "Min Distance check: CPU and GPU results agree within tolerance.\n" );
}
else
{
printf( "Min Distance check: CPU and GPU results don't agree within tolerance !!!\n" );
}
}
// Dump Profile Info
if (g_app.profile)
{
float loops = (float)g_app.profileActualLoops;
float o_l = 1.0f / loops;
float avgP = BF_P_onto_device * o_l;
float avgD = BF_D_from_device * o_l;
float avgM = BF_M_from_device * o_l;
float avgGPUdist = BF_GPU_dist * o_l;
float avgCPUdist = BF_CPU_dist * o_l;
float avgGPUmin = BF_GPU_min * o_l;
float avgCPUmin = BF_CPU_min * o_l;
// Verbose
printf( "Number of profile loops = %f.\n", loops );
printf( "BF - Copy 'Point' vector onto GPU, time: %f msecs.\n", avgP );
printf( "BF - Copy 'Dists' vector from GPU, time: %f msecs.\n", avgD );
printf( "BF - Copy 'Results' from GPU, time: %f msecs.\n", avgM );
printf( "BF - GPU Distance computation, time: %f msecs.\n", avgGPUdist );
printf( "BF - CPU Distance computation, time: %f msecs.\n", avgCPUdist );
printf( "BF - GPU Min Distance computation, time: %f msecs.\n", avgGPUmin );
printf( "BF - CPU Min Distance computation, time: %f msecs.\n\n", avgCPUmin );
// Terse
//printf( "BF - P, D, M, G_D, C_D, G_M, C_M\n" );
//printf( " %f, %f, %f, %f, %f, %f, %f\n\n", avgP, avgD, avgM, avgGPUdist, avgCPUdist, avgGPUmin, avgCPUmin );
}
else
{
printf( "BF - Copy 'Point' vector onto GPU, time: %f msecs.\n", BF_P_onto_device );
printf( "BF - Copy 'Dists' vector from GPU, time: %f msecs.\n", BF_D_from_device );
printf( "BF - Copy 'Results' from GPU, time: %f msecs.\n", BF_M_from_device );
printf( "BF - GPU Distance computation, time: %f msecs.\n", BF_GPU_dist );
printf( "BF - CPU Distance computation, time: %f msecs.\n", BF_CPU_dist );
printf( "BF - GPU Min Distance computation, time: %f msecs.\n", BF_GPU_min );
printf( "BF - CPU Min Distance computation, time: %f msecs.\n\n", BF_CPU_min );
}
/*---------------------------------
Step 13. Cleanup vector memory
---------------------------------*/
printf( "Shutting Down...\n" );
// clean up allocations
free( h_Points );
free( h_Dists_GPU );
free( h_Dists_CPU );
free( h_result_GPU );
sdkDeleteTimer( g_app.hTimer );
checkCudaErrors( cudaFree( d_Points ) );
checkCudaErrors( cudaFree( d_Dists ) );
checkCudaErrors( cudaFree( d_Reduce ) );
printf( "Shutdown done...\n\n" );
#endif
// Success
return true;
}
/*---------------------------------------------------------
Name: ComputeBlockShapeFromVector
Desc:
Notes:
0. Assumes following members are initialized properly
before this funciton is called
shape.nElems = Number of original elements in vector
shape.tpr = threads per row
shape.rpb = rows per block
1. Block Limits
Thread block has at most 512 theads per block
2. Grid Limits
Grid has at most 65,535 blocks in any dimension
So a 1D grid is at most 65,535 x 1
and a 2D grid is at most 65,535 x 65,535
We use next smallest even number to these limits
IE 65,535 - 1
IE (65,535*65,535 - 1)
It's useful to have an even number of columns
in grid structure when doing reductions
---------------------------------------------------------*/
bool ComputeBlockShapeFromVector
(
BlockGridShape & bgShape // IN/OUT - bgShape
)
{
unsigned int remainder, extra;
//---------------------------------
// 1. Compute Threads Per Block
//---------------------------------
if ((bgShape.threadsPerRow > 512) || (bgShape.rowsPerBlock > 512))
{
// Error - block can't have more than 512 threads
printf( "block (%d TPR x %d RPB) > 512 TPB\n",
bgShape.threadsPerRow, bgShape.rowsPerBlock );
printf( "Error - can't have more than 512 threads in block" );
}
// Compute threads per block
bgShape.threadsPerBlock = bgShape.threadsPerRow * bgShape.rowsPerBlock;
// Make sure we don't exceed block limits
if (bgShape.threadsPerBlock > 512)
{
// Error - block can't have more than 512 threads
printf( "block (%d TPR x %d RPB) = %d TPB\n",
bgShape.threadsPerRow, bgShape.rowsPerBlock, bgShape.threadsPerBlock );
printf( "Error - can't have more than 512 threads in block" );
return false;
}
//---------------------------------
// 2. Compute GRID structure
//---------------------------------
// Compute number of blocks needed to contain all elements in vector
bgShape.blocksPerGrid = bgShape.nElems / bgShape.threadsPerBlock;
remainder = bgShape.nElems % bgShape.threadsPerBlock;
extra = ((0 == remainder) ? 0 : 1);
bgShape.blocksPerGrid += extra;
// Check if need a 1D Grid or 2D Grid structure
// to contain all blocks in grid
if (bgShape.blocksPerGrid <= 65534)
{
// We can use a simple 1D grid of blocks
bgShape.blocksPerRow = bgShape.blocksPerGrid;
bgShape.rowsPerGrid = 1;
}
else if (bgShape.blocksPerGrid <= 4294836224) // 4294836224 = (65535 * 65535 - 1)
{
// We need to use a 2D Grid structure instead...
// Use square root as an approximation for shape of 2D grid
float sq_r = sqrtf( (float)( bgShape.blocksPerGrid ) );
unsigned int uiSqrt = (unsigned int)sq_r;
bgShape.blocksPerRow = uiSqrt;
bgShape.rowsPerGrid = uiSqrt;
// Increment # of columns until we have enough space
// in grid layout for all elements in vector
while ((bgShape.blocksPerRow * bgShape.rowsPerGrid) < bgShape.blocksPerGrid)
{
bgShape.blocksPerRow++;
}
}
else
{
// Error - Vector is too large for 2D Grid
printf( "Vector is way too large...\n" );
return false;
}
// Make sure # of columns in 1D or 2D grid is even
// Which is useful to avoid special cases in reduction kernel
remainder = bgShape.blocksPerRow % 2;
extra = ((0 == remainder) ? 0 : 1);
bgShape.blocksPerRow += extra;
// Compute # of padded blocks in Grid
bgShape.blocksPerGrid = bgShape.blocksPerRow * bgShape.rowsPerGrid;
// Make sure we don't exceed grid limits
if ((bgShape.blocksPerRow >= 65535) || (bgShape.rowsPerGrid >= 65535))
{
// Error - Grid can't have more than 65535 blocks in any dimension
printf( "Grid (%d BPR x %d RPG) = %d BPG\n",
bgShape.blocksPerRow, bgShape.rowsPerGrid, bgShape.blocksPerGrid );
printf( "Error - can't have more than 65535 blocks in any dimension\n" );
return false;
}
// Compute Width and Height of 2D vector structure
bgShape.W = bgShape.threadsPerRow * bgShape.blocksPerRow; // Width (in elements) of 2D vector
bgShape.H = bgShape.rowsPerBlock * bgShape.rowsPerGrid; // Height (in elements) of 2D vector
// Compute padded length of 2D vector
unsigned int sizeWH = bgShape.W * bgShape.H;
unsigned int sizeBG = bgShape.threadsPerBlock * bgShape.blocksPerGrid;
if (sizeWH != sizeBG)
{
// Programmer error-
printf( "Error - sizes don't match\n" );
return false;
}
// Compute number of elements in padded block structure
bgShape.nPadded = sizeWH;
// Success
return true;
}
/*---------------------------------------------------------
Name: ComputeBlockShapeFromQueryVector
Desc:
Notes:
0. Assumes following members are initialized properly
before this function is called
shape.nElems = Number of original elements in query vector
shape.tpr = threads per row
shape.rpb = rows per block
1. Block Limits
Thread block has at most 512 theads per block
2. Grid Limits
Grid has at most 65,535 blocks in any dimension
So a 1D grid is at most 65,535 x 1
and a 2D grid is at most 65,535 x 65,535
We use next smallest even number to these limits
IE 65,535 - 1
IE (65,535*65,535 - 1)
---------------------------------------------------------*/
bool ComputeBlockShapeFromQueryVector
(
BlockGridShape & bgShape // IN/OUT - bgShape
)
{
unsigned int remainder, extra;
//---------------------------------
// 1. Compute Threads Per Block
//---------------------------------
if ((bgShape.threadsPerRow > 512) || (bgShape.rowsPerBlock > 512))
{
// Error - block can't have more than 512 threads
printf( "block (%d TPR x %d RPB) > 512 TPB\n",
bgShape.threadsPerRow, bgShape.rowsPerBlock );
printf( "Error - can't have more than 512 threads in block" );
}
// Compute threads per block
bgShape.threadsPerBlock = bgShape.threadsPerRow * bgShape.rowsPerBlock;
// Make sure we don't exceed block limits
if (bgShape.threadsPerBlock > 512)
{
// Error - block can't have more than 512 threads
printf( "block (%d TPR x %d RPB) = %d TPB\n",
bgShape.threadsPerRow, bgShape.rowsPerBlock, bgShape.threadsPerBlock );
printf( "Error - can't have more than 512 threads in block" );
return false;
}
//---------------------------------
// 2. Compute GRID structure
//---------------------------------
// Compute number of blocks needed to contain all elements in vector
bgShape.blocksPerGrid = bgShape.nElems / bgShape.threadsPerBlock;
remainder = bgShape.nElems % bgShape.threadsPerBlock;
extra = ((0 == remainder) ? 0 : 1);
bgShape.blocksPerGrid += extra;
// Check if need a 1D Grid or 2D Grid structure
// to contain all blocks in grid
if (bgShape.blocksPerGrid <= 65534)
{
// We can use a simple 1D grid of blocks
bgShape.blocksPerRow = bgShape.blocksPerGrid;
bgShape.rowsPerGrid = 1;
}
else if (bgShape.blocksPerGrid <= 4294836224) // 4294836224 = (65535 * 65535 - 1)
{
// We need to use a 2D Grid structure instead...
// Use square root as an approximation for shape of 2D grid
float sq_r = sqrtf( (float)( bgShape.blocksPerGrid ) );
unsigned int uiSqrt = (unsigned int)sq_r;
bgShape.blocksPerRow = uiSqrt;
bgShape.rowsPerGrid = uiSqrt;
// Increment # of columns until we have enough space
// in grid layout for all elements in vector
while ((bgShape.blocksPerRow * bgShape.rowsPerGrid) < bgShape.blocksPerGrid)
{
bgShape.blocksPerRow++;
}
}
else
{
// Error - Vector is too large for 2D Grid
printf( "Vector is way too large...\n" );
return false;
}
// Make sure we don't exceed grid limits
if ((bgShape.blocksPerRow >= 65535) || (bgShape.rowsPerGrid >= 65535))
{
// Error - Grid can't have more than 65535 blocks in any dimension
printf( "Grid (%d BPR x %d RPG) = %d BPG\n",
bgShape.blocksPerRow, bgShape.rowsPerGrid, bgShape.blocksPerGrid );
printf( "Error - can't have more than 65535 blocks in any dimension\n" );
return false;
}
// Compute # of padded blocks in Grid
bgShape.blocksPerGrid = bgShape.blocksPerRow * bgShape.rowsPerGrid;
// Compute Width and Height of 2D vector structure
bgShape.W = bgShape.threadsPerRow * bgShape.blocksPerRow; // Width (in elements) of 2D vector
bgShape.H = bgShape.rowsPerBlock * bgShape.rowsPerGrid; // Height (in elements) of 2D vector
// Compute padded length of 2D vector
unsigned int sizeWH = bgShape.W * bgShape.H;
unsigned int sizeBG = bgShape.threadsPerBlock * bgShape.blocksPerGrid;
if (sizeWH != sizeBG)
{
// Programmer error-
printf( "Error - sizes don't match\n" );
return false;
}
// Compute number of elements in padded block structure
bgShape.nPadded = sizeWH;
// Success
return true;
}
/*---------------------------------------------------------
Name: RandomFloat
Desc: Generates a random float value in range [low,high]
---------------------------------------------------------*/
float RandomFloat( float low, float high )
{
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
/*---------------------------------------------------------
Name: InitCUDA
Desc: Initialize CUDA system for GPU processing
---------------------------------------------------------*/
// Runtime API version...
bool InitCUDA( AppGlobals & g )
{
bool bResult = false;
int nDevices = 0;
int deviceToUse = 0;
unsigned int cudaContextFlags = 0;
cudaError_t cudaResult = cudaSuccess;
#if (CUDA_PLATFORM == CUDA_DEVICE)
CUresult cuda_Result = CUDA_SUCCESS;
// Initialize CUDA
unsigned int cudaFlags = 0;
cuda_Result = cuInit( cudaFlags );
if (CUDA_SUCCESS != cuda_Result)
{
// Error - cudaGetDeviceCount() failed
fprintf( stderr, "InitCuda() - cuInit() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
// Get count of CUDA Devices
cuda_Result = cuDeviceGetCount(&nDevices);
if (CUDA_SUCCESS != cuda_Result)
{
// Error - cudaGetDeviceCount() failed
fprintf( stderr, "InitCuda() - cuDeviceGetCount() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
if (nDevices <= 0)
{
// No Valid Display Device found
cuda_Result = CUDA_ERROR_INVALID_DEVICE;
fprintf( stderr, "InitCuda() - no valid display device found, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
else if (nDevices >= 2)
{
deviceToUse = 1;
}
// Get Specified Device
cuda_Result = cuDeviceGet( &(g.currDevice), deviceToUse );
if (CUDA_SUCCESS != cudaResult )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - cuDeviceGet() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
// Get RAW Device Properties
cuda_Result = cuDeviceGetProperties( &(g.rawProps), g.currDevice );
if (CUDA_SUCCESS != cuda_Result )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - cuDeviceGetProperties() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
// Set up the CUDA context
cuda_Result = cuCtxCreate( &g.currContext, cudaContextFlags, g.currDevice );
if ( CUDA_SUCCESS != cuda_Result )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - cuCtxCreate() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
// Get CUDA Display Device Properties
cudaResult = cudaGetDeviceProperties( &(g.cudaProps) , deviceToUse );
if ( cudaSuccess != cudaResult )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - cudaGetDeviceProperties() failed, error = %x in file '%s' in line %i.\n",
cudaResult, __FILE__, __LINE__ );
goto lblError;
}
#elif (CUDA_PLATFORM == CUDA_CUDA)
// Pick Display Device to perform GPU calculations on...
cudaResult = cudaGetDeviceCount( &nDevices );
if ( cudaSuccess != cudaResult )
{
// Error - cudaGetDeviceCount() failed
fprintf( stderr, "InitCuda() - cudaGetDeviceCount() failed, error = %x in file '%s' in line %i.\n",
cudaResult, __FILE__, __LINE__ );
goto lblError;
}
// Note: Assumes Device 0 = primary display device
// Assumes Device 1 = work horse for CUDA
if (nDevices <= 0)
{
// No Valid Display Device found
cudaResult = cudaErrorInvalidDevice;
fprintf( stderr, "InitCuda() - no valid display device found, error = %x in file '%s' in line %i.\n",
cudaResult, __FILE__, __LINE__ );
goto lblError;
}
else if (nDevices >= 2)
{
deviceToUse = 1;
}
// Get Display Device Properties
cudaResult = cudaGetDeviceProperties( &(g.cudaProps) , deviceToUse );
if ( cudaSuccess != cudaResult )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - cudaGetDeviceProperties() failed, error = %x in file '%s' in line %i.\n",
cudaResult, __FILE__, __LINE__ );
goto lblError;
}
// Setup Display Device
cudaResult = cudaSetDevice( deviceToUse );
if ( cudaSuccess != cudaResult )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - cudaSetDevice() failed, error = %x in file '%s' in line %i.\n",
cudaResult, __FILE__, __LINE__ );
goto lblError;
}
#endif // CUDA_CUDA
// Success
bResult = true;
lblError:
return bResult;
}
//---------------------------------------------------------
// Name: FiniCUDA
// Desc: Cleanup CUDA system
//---------------------------------------------------------
bool FiniCUDA()
{
#if (CUDA_PLATFORM == CUDA_DEVICE)
// Detach CUDA from current thread
CUresult cuda_Result = CUDA_SUCCESS;
cuda_Result = cuCtxDetach( g_app.currContext );
if (CUDA_SUCCESS != cuda_Result)
{
// Error - cuCtxDetach() failed
fprintf( stderr, "FiniCUDA() - cuCtxDetach() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
return false;
}
#endif
// Success
return true;
}
/*---------------------------------------------------------
Name: InitGlobals
Desc: Initialize Application Globals to Default
---------------------------------------------------------*/
bool InitGlobals( AppGlobals & g )
{
//
// Set Defaults
//
// Search Vectors
g.nSearch = 100;
g.searchList = NULL;
g.nQuery = 10;
g.queryList = NULL;
// Cuda Properties
size_t byteSize;
#if (CUDA_PLATFORM == CUDA_DEVICE)
g.currDevice = 0;
// Initialize cuda device props to zero
byteSize = sizeof( g.rawProps );
memset( &g.rawProps, 0, byteSize );
#endif
// Initialize cuda props to zero
byteSize = sizeof( g.cudaProps );
memset( &g.cudaProps, 0, byteSize );
// Init Block Grid Shape
InitShapeDefaults( g.bgShape );
// App Properties
g.nopromptOnExit = 0;
g.doubleCheckDists = 1;
// Profiling Info
g.hTimer = 0;
g.profile = 1;
g.profileSkipFirstLast = 0;
g.profileRequestedLoops = 1;
g.profileActualLoops = 1;
return true;
}
/*---------------------------------------------------------
Name: GetCommandLineParameters
Desc:
---------------------------------------------------------*/
bool GetCommandLineParams
(
int argc, // Count of Command Line Parameters
const char** argv, // List of Command Line Parameters
AppGlobals & g // Structure to store results in
)
{
int iVal;
// Prompt before exiting application ?!?
if (cutCheckCmdLineFlag( argc, argv, "noprompt") )
{
g.nopromptOnExit = true;
}
else
{
g.nopromptOnExit = false;
}
// Double Check Distances
if (cutCheckCmdLineFlag( argc, argv, "cdist") )
{
g.doubleCheckDists = true;
}
else
{
g.doubleCheckDists = false;
}
// Double Check Distances
if (cutCheckCmdLineFlag( argc, argv, "cmin") )
{
g.doubleCheckMin = true;
}
else
{
g.doubleCheckMin = false;
}
// Get # Threads Per Row (block shape)
if (cutGetCmdLineArgumenti( argc, argv, "TPR", &iVal ))
{
if (iVal < 1) { iVal = 1; }
g.bgShape.threadsPerRow = iVal;
}
// Get # Rows Per Block
if (cutGetCmdLineArgumenti( argc, argv, "RPB", &iVal ))
{
if (iVal < 1) { iVal = 1; }
g.bgShape.rowsPerBlock = iVal;
}
// Calculate Threads Per Block
g.bgShape.threadsPerBlock = g.bgShape.threadsPerRow * g.bgShape.rowsPerBlock;
if (g.bgShape.threadsPerBlock > 512)
{
// Error - Can't have more than 512 threads per block
printf( "Max Threads Per Block is 512!!!\n\n" );
return false;
}
// Get search Vector Length
if (cutGetCmdLineArgumenti( argc, argv, "N", &iVal ))
{
if (iVal < 1) { iVal = 10000; }
g.nSearch = (int)iVal;
}
// Get Query Vector Length
if (cutGetCmdLineArgumenti( argc, argv, "NQ", &iVal ))
{
if (iVal < 1) { iVal = 100; }
g.nQuery = (int)iVal;
}
// Should we do profiling (performance measurements) on application
if (cutCheckCmdLineFlag( argc, argv, "profile") )
{
g.profile = true;
}
else
{
g.profile = false;
}
if (g.profile)
{
// Get Skip First Last flag
if (cutCheckCmdLineFlag( argc, argv, "skip") )
{
g.profileSkipFirstLast = true;
}
else
{
g.profileSkipFirstLast = false;
}
// Get Number of Iterations for Profiling performance
if (cutGetCmdLineArgumenti( argc, argv, "profile", &iVal ))
{
if (iVal < 1) { iVal = 100; }
g.profileRequestedLoops = iVal;
if (g.profileSkipFirstLast)
{
g.profileActualLoops = g.profileRequestedLoops + 2;
}
else
{
g.profileActualLoops = g.profileRequestedLoops;
}
}
}
// Success
return true;
}
/*---------------------------------------------------------
Name: InitShapeDefaults
---------------------------------------------------------*/
void InitShapeDefaults( BlockGridShape & bgShape )
{
// Default Thread, Grid, Vector Properties
bgShape.nElems = 100;
bgShape.threadsPerRow = 1;
bgShape.rowsPerBlock = 1;
bgShape.threadsPerBlock = bgShape.threadsPerRow * bgShape.rowsPerBlock;
bgShape.blocksPerRow = 100;
bgShape.rowsPerGrid = 1;
bgShape.blocksPerGrid = bgShape.blocksPerRow * bgShape.rowsPerGrid;
bgShape.W = bgShape.threadsPerRow * bgShape.blocksPerRow;
bgShape.H = bgShape.rowsPerBlock * bgShape.rowsPerGrid;
bgShape.nPadded = bgShape.W * bgShape.H;
}
/*---------------------------------------------------------
Name: DumpBlockGridShape
---------------------------------------------------------*/
void DumpBlockGridShape( BlockGridShape & bgShape )
{
printf( "N = %d, NPadded = %d\n",
bgShape.nElems, bgShape.nPadded );
printf( "Block (%d TPR x %d RPB) = %d TPB\n",
bgShape.threadsPerRow, bgShape.rowsPerBlock,
bgShape.threadsPerBlock );
printf( "Grid (%d BPR x %d RPG) = %d BPG\n",
bgShape.blocksPerRow, bgShape.rowsPerGrid,
bgShape.blocksPerGrid );
printf( "W = %d, H = %d\n",
bgShape.W, bgShape.H );
}
|
7acaf301df9774cf2c77caa88550c068023f4310.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO:
__global__ void kernNaiveScan(const int n, const int d, int* odata, const int* idata) {
/*1: for d = 1 to log2 n do
2 : for all k in parallel do
3 : if k U2265.GIF 2 d then
4 : x[out][k] = x[in][k C 2 d - 1] + x[in][k]
5 : else
6 : x[out][k] = x[in][k]*/
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
int dPow = powf(2, d - 1);
if (index >= dPow) {
odata[index] = idata[index - dPow] + idata[index];
}
else {
odata[index] = idata[index];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int blockSize = 128;
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
int* device_idata;
int* device_odata;
hipMalloc((void**)&device_idata, n * sizeof(int));
hipMalloc((void**)&device_odata, n * sizeof(int));
hipMemcpy(device_idata, idata, n * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(device_odata, idata, n * sizeof(int), hipMemcpyHostToDevice);
timer().startGpuTimer();
// TODO
for (int d = 1; d <= ilog2ceil(n); d++) {
kernNaiveScan << < fullBlocksPerGrid, blockSize >> > (n, d, device_odata, device_idata);
int* temp = device_idata;
device_idata = device_odata;
device_odata = temp;
}
timer().endGpuTimer();
hipDeviceSynchronize();
hipMemcpy(odata + 1, device_idata, (n - 1) * sizeof(int), hipMemcpyDeviceToHost);
hipFree(device_idata);
hipFree(device_odata);
}
}
}
//__global__ void scan(float* g_odata, float* g_idata, int n) {
// extern __shared__ float temp[]; // allocated on invocation
// int thid = threadIdx.x;
// int pout = 0, pin = 1; // Load input into shared memory.
// // This is exclusive scan, so shift right by one
// // and set first element to 0
// temp[pout*n + thid] = (thid > 0) ? g_idata[thid-1] : 0;
// __syncthreads();
// for (int offset = 1; offset < n; offset *= 2)
// {
// pout = 1 - pout; // swap double buffer indices
// pin = 1 - pout;
// if (thid >= offset)
// temp[pout*n+thid] += temp[pin*n+thid - offset];
// else
// temp[pout*n+thid] = temp[pin*n+thid];
// __syncthreads();
// }
// g_odata[thid] = temp[pout*n+thid]; // write output
//} | 7acaf301df9774cf2c77caa88550c068023f4310.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO:
__global__ void kernNaiveScan(const int n, const int d, int* odata, const int* idata) {
/*1: for d = 1 to log2 n do
2 : for all k in parallel do
3 : if k U2265.GIF 2 d then
4 : x[out][k] = x[in][k ĘC 2 d - 1] + x[in][k]
5 : else
6 : x[out][k] = x[in][k]*/
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
int dPow = powf(2, d - 1);
if (index >= dPow) {
odata[index] = idata[index - dPow] + idata[index];
}
else {
odata[index] = idata[index];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int blockSize = 128;
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
int* device_idata;
int* device_odata;
cudaMalloc((void**)&device_idata, n * sizeof(int));
cudaMalloc((void**)&device_odata, n * sizeof(int));
cudaMemcpy(device_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(device_odata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
timer().startGpuTimer();
// TODO
for (int d = 1; d <= ilog2ceil(n); d++) {
kernNaiveScan << < fullBlocksPerGrid, blockSize >> > (n, d, device_odata, device_idata);
int* temp = device_idata;
device_idata = device_odata;
device_odata = temp;
}
timer().endGpuTimer();
cudaThreadSynchronize();
cudaMemcpy(odata + 1, device_idata, (n - 1) * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(device_idata);
cudaFree(device_odata);
}
}
}
//__global__ void scan(float* g_odata, float* g_idata, int n) {
// extern __shared__ float temp[]; // allocated on invocation
// int thid = threadIdx.x;
// int pout = 0, pin = 1; // Load input into shared memory.
// // This is exclusive scan, so shift right by one
// // and set first element to 0
// temp[pout*n + thid] = (thid > 0) ? g_idata[thid-1] : 0;
// __syncthreads();
// for (int offset = 1; offset < n; offset *= 2)
// {
// pout = 1 - pout; // swap double buffer indices
// pin = 1 - pout;
// if (thid >= offset)
// temp[pout*n+thid] += temp[pin*n+thid - offset];
// else
// temp[pout*n+thid] = temp[pin*n+thid];
// __syncthreads();
// }
// g_odata[thid] = temp[pout*n+thid]; // write output
//} |
main_gpu.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
/**************************************************************
The code in time.h is a part of a course on cuda taught by its authors:
Lokman A. Abbas-Turki
**************************************************************/
#include "timer.h"
// Compare function for qsort
int compare_function(const void *a,const void *b) {
float *x = (float *) a;
float *y = (float *) b;
if (*x < *y) return - 1;
else if (*x > *y) return 1;
return 0;
}
// Generate gaussian vector using Box Muller
void gaussian_vector(float *v, float mu, float sigma, int n) {
for (int i = 0; i<n; i++){
float u1 = (float)rand()/(float)(RAND_MAX);
float u2 = (float)rand()/(float)(RAND_MAX);
v[i] = sigma * (sqrtf( -2 * logf(u1)) * cosf(2 * M_PI * u2)) + mu;
}
}
//Function to print a small vector of floats on host
void print_vector(float *c, int m, int n) {
for (int i=0; i<m; i++){
printf("%f ", c[i]);
printf("\n");
}
}
// Kernel for computing the square of a vector (INPLACE)
// We actually only need z ** 2 in the computations and not z
// The square norm is also computed
__global__ void square_kernel(float *zsqrGPU, float *znorm, int n){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while(idx < n){
float zi = zsqrGPU[idx];
float zsqr_i = zi * zi;
zsqrGPU[idx] = zi * zi;
atomicAdd(znorm, zsqr_i);
idx += gridDim.x * blockDim.x;
}
}
// Device function for computing f (the secular function of interest) at a given point x
__device__ float secfunc(float *dGPU, float *zsqrGPU, float rho, float x, int n) {
float sum = 0;
for (int i=0; i < n; i++){
sum += zsqrGPU[i] / (dGPU[i] - x);
}
return rho + sum;
}
// Device function for computing f' (the prime derivative of the secular function of interest) at a given point x
__device__ float secfunc_prime(float *dGPU, float *zsqrGPU, float x, int n) {
float sum = 0;
for (int i=0; i < n; i++){
int di = dGPU[i];
sum += zsqrGPU[i] / ((di - x) * (di - x));
}
return sum;
}
// Device function for computing f'' (the second derivative of the secular function of interest)
__device__ float secfunc_second(float *dGPU, float *zsqrGPU, float x, int n){
float sum = 0;
for (int i = 0; i < n; i++) {
float di = dGPU[i];
sum += zsqrGPU[i] / ((di - x) * (di - x) * (di - x));
}
return 2 * sum;
}
// Useful intermediary function, see equations (30) and (31) from Li's paper on page 13 and equation (42) on page 20
__device__ float discrimant_int(float a, float b, float c){
if (a <= 0) return (a - sqrtf(a * a - 4 * b * c)) / (2 * c);
else return (2 * b) / (a + sqrtf(a * a - 4 * b *c));
}
// Useful intermediary function, see equation (46) from Li's paper on page 21
__device__ float discrimant_ext(float a, float b, float c){
if (a >= 0) return (a + sqrtf(a * a - 4 * b * c)) / (2 * c);
else return (2 * b) / (a - sqrtf(a * a - 4 * b *c));
}
// h partition of the secular function, used for Initialization
__device__ float h_secfunc(float d_k, float d_kplus1, float zsqr_k, float zsqr_kplus1, float x){
return zsqr_k / (d_k - x) + zsqr_kplus1 / (d_kplus1 - x);
}
// Initialization for interior roots (see section 4 of Li's paper - initial guesses from page 18)
__device__ float initialization_int(float *dGPU, float *zsqrGPU, float rho, int k, int n){
float d_k = dGPU[k];
float d_kplus1 = dGPU[k + 1];
float zsqr_k = zsqrGPU[k];
float zsqr_kplus1 = zsqrGPU[k + 1];
float middle = (d_k + d_kplus1) / 2;
float delta = d_kplus1 - d_k;
float f = secfunc(dGPU, zsqrGPU, rho, middle, n);
float c = f - h_secfunc(d_k, d_kplus1, zsqr_k, zsqr_kplus1, middle);
if (f >= 0){
float a = c * delta + zsqr_k + zsqr_kplus1;
float b = zsqr_k * delta;
return discrimant_int(a, b, c) + d_k;
}
else {
float a = - c * delta + zsqr_k + zsqr_kplus1;
float b = - zsqr_kplus1 * delta;
return discrimant_int(a, b, c) + d_kplus1;
}
}
// Initialization for the exterior root (see section 4 of Li's paper - initial guesses from page 18)
__device__ float initialization_ext(float *dGPU, float *zsqrGPU, float *znorm, float rho, int n){
float d_nminus1 = dGPU[n - 1];
float d_nminus2 = dGPU[n - 2];
float d_n = d_nminus1 + znorm[0] / rho;
float zsqr_nminus1 = zsqrGPU[n - 1];
float zsqr_nminus2 = zsqrGPU[n - 2];
float middle = (d_nminus1 + d_n) / 2;
float f = secfunc(dGPU, zsqrGPU, rho, middle, n);
if (f <= 0){
float hd = h_secfunc(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, d_n);
float c = f - h_secfunc(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, middle);
if (c <= - hd) {
return d_n;
}
else {
float delta = d_nminus1 - d_nminus2;
float a = - c * delta + zsqr_nminus2 + zsqr_nminus1;
float b = - zsqr_nminus1 * delta;
return discrimant_ext(a, b, c) + d_n;
}
}
else {
float delta = d_nminus1 - d_nminus2;
float c = f - h_secfunc(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, middle);
float a = - c * delta + zsqr_nminus2 + zsqr_nminus1;
float b = - zsqr_nminus1 * delta;
return discrimant_ext(a, b, c) + d_n;
}
}
// Computation of a from the paper (page 13)
__device__ float a_gragg(float f, float fprime, float delta_k, float delta_kplus1){
return (delta_k + delta_kplus1) * f - delta_k * delta_kplus1 * fprime;
}
// Computation of b from the paper (page 13)
__device__ float b_gragg(float f, float delta_k, float delta_kplus1){
return delta_k * delta_kplus1 * f;
}
// Computation of c from the section Gragg of the paper (page 15)
__device__ float c_gragg(float f, float fprime, float fsecond, float delta_k, float delta_kplus1){
return f - (delta_k + delta_kplus1) * fprime + delta_k * delta_kplus1 * fsecond / 2.0;
}
// Compute of the update for x (eta) for the interior roots (see section 3.1 - Iteration fomulas, pages 12 and 13)
__device__ float eta_int(float d_k, float d_kplus1, float f, float fprime, float fsecond, float x, int k, int n){
float delta_k = d_k - x;
float delta_kplus1 = d_kplus1 - x;
float a = a_gragg(f, fprime, delta_k, delta_kplus1);
float b = b_gragg(f, delta_k, delta_kplus1);
float c = c_gragg(f, fprime, fsecond, delta_k, delta_kplus1);
float eta = discrimant_int(a, b, c);
return eta;
}
// Compute of the update of x (+eta) for the exterior root
__device__ float eta_ext(float d_nminus2, float d_nminus1, float f, float fprime, float fsecond, float x, int n){
float delta_nminus2 = d_nminus2 - x;
float delta_nminus1 = d_nminus1 - x;
float a = a_gragg(f, fprime, delta_nminus2, delta_nminus1);
float b = b_gragg(f, delta_nminus2, delta_nminus1);
float c = c_gragg(f, fprime, fsecond, delta_nminus2, delta_nminus1);
float eta = discrimant_ext(a, b, c);
return eta;
}
// Iterate to find the k-th interior root
__device__ float find_root_int(float *dGPU, float *zsqrGPU, float rho, float x, int k, int n, int maxit, float epsilon){
int i = 0;
float f = secfunc(dGPU, zsqrGPU, rho, x, n);;
float d_k = dGPU[k];
float d_kplus1 = dGPU[k + 1];
while ((i < maxit) && (fabsf(f) > epsilon)){
f = secfunc(dGPU, zsqrGPU, rho, x, n);
float fprime = secfunc_prime(dGPU, zsqrGPU, x, n);
float fsecond = secfunc_second(dGPU, zsqrGPU, x, n);
float eta = eta_int(d_k, d_kplus1, f, fprime, fsecond, x, k, n);
x += eta;
i ++;
}
// Print eigenvalue regularly to check their value and the associated spectral function
if (k%(int)(n/10) == 0){
printf("eigenvalue %d: %f, with spectral function %f after %d iterations \n", k, x, f, i);
}
return x;
}
// Iterate to find the last root (the exterior one)
__device__ float find_root_ext(float *dGPU, float *zsqrGPU, float rho, float x, int n, int maxit, float epsilon){
int i = 0;
float d_nminus2 = dGPU[n - 2];
float d_nminus1 = dGPU[n - 1];
float f = secfunc(dGPU, zsqrGPU, rho, x, n);
while ((i < maxit) && (fabsf(f) > epsilon)){
f = secfunc(dGPU, zsqrGPU, rho, x, n);
float fprime = secfunc_prime(dGPU, zsqrGPU, x, n);
float fsecond = secfunc_second(dGPU, zsqrGPU, x, n);
float eta = eta_ext(d_nminus2, d_nminus1, f, fprime, fsecond, x, n);
x += eta;
i ++;
}
// Print the last eigen value
printf("eigenvalue %d: %f, with spectral function %f after %d iterations \n", n - 1, x, f, i);
return x;
}
// Kernel to launch and distribute the searching of roots among GPU cores
__global__ void find_roots_kernel(float *xstarGPU, float *x0GPU, float *dGPU, float *zsqrGPU, float *znorm, float rho, int n, int maxit, float epsilon){
// We define shared variables for values that are used by multiple threads
__shared__ float rho_shared, epsilon_shared;
__shared__ int n_shared, maxit_shared;
rho_shared = rho;
epsilon_shared = epsilon;
n_shared = n;
maxit_shared = maxit;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// First core gets search of the last root (the exterior one)
if (idx == 0){
float x = x0GPU[n - 1];
xstarGPU[n - 1] = find_root_ext(dGPU, zsqrGPU, rho_shared, x, n_shared, maxit_shared, epsilon_shared);
}
// Each next core searches one interval (interior interval)
else {
while (idx < n) {
float x = x0GPU[idx - 1];
xstarGPU[idx - 1] = find_root_int(dGPU, zsqrGPU, rho_shared, x, idx - 1, n_shared, maxit_shared, epsilon_shared);
// in case we have not launched enough cores to cover all intervals
idx += gridDim.x * blockDim.x;
}
}
}
// Kernel to compute the initial guesses from the paper on GPU
__global__ void initialize_x0_kernel(float *x0GPU, float *dGPU, float *zsqrGPU, float *znorm, float rho, int n){
// We define shared variables for values that are used by multiple threads
__shared__ float znormGPU_shared, rho_shared;
__shared__ int n_shared;
znormGPU_shared = *znorm;
rho_shared = rho;
n_shared = n;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// First core compute the initial guess for last root (the exterior one)
if (idx == 0){
x0GPU[n - 1] = initialization_ext(dGPU, zsqrGPU, &znormGPU_shared, rho_shared, n_shared);
}
// Each next core compute initial guess for one interval (interior interval)
else {
while (idx < n) {
x0GPU[idx - 1] = initialization_int(dGPU, zsqrGPU, rho_shared, idx - 1, n_shared);
idx += gridDim.x * blockDim.x;
}
}
}
// Kernel to "wake up" the GPU
__global__ void wake_up(int *test){
__shared__ int c;
c = 3;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < 1024)
{
test[idx] += c;
}
}
int main (void) {
/****************** Declaration ******************/
// Declare vectors or floats
float *d, *z, *xstar;
// rho parameter
float rho = 10;
// Size of arrow matrix chosen by the user
int n;
printf("\nWhich n (number of roots for the function) do you want? \n");
scanf("%d", &n);
printf("\n \n******************* CHOICE OF N ******************** \n");
printf("n = %d\n", n);
/************* Hyperparameters setting **************/
//Maximum number of iterations
int maxit = 1e4;
//Stopping criterion
float epsilon = 1e-6;
/***************** Data generation *****************/
// Memory allocation
d = (float*)malloc(n*sizeof(float));
z = (float*)malloc(n*sizeof(float));
xstar = (float*)malloc(n*sizeof(float));
// Create instance of class Timer
Timer Tim;
//Fill the vector d with linear function of n
for (int i=0; i < n; i++){
d[i] = 2 * n - i;
}
// sort the vector in ascending order
qsort(d, n, sizeof(float), compare_function);
// Gaussian rank 1 perturbation
float mu_z = 5;
float sigma_z = 1;
gaussian_vector(z, mu_z, sigma_z, n);
/**************** Wake Up GPU *****************/
// We first wake up the GPU
int *testGPU;
hipMalloc(&testGPU, 1024*sizeof(int));
hipLaunchKernelGGL(( wake_up) , dim3(1024), dim3(512), 0, 0, testGPU);
hipFree(testGPU);
/**************** Information Display *****************/
printf("\n\n**************************************************** \n");
printf("*********************** GPU ************************ \n");
printf("**************************************************** \n\n\n");
printf("********************* CONTROLS ********************* \n");
printf("We print the first, the last and 10 %% of the interior eigenvalues as a check \n");
/***************** GPU memory alloc *****************/
// Start timer
Tim.start();
// Declare vectors on GPU
float *dGPU, *zsqrGPU, *znorm, *x0GPU, *xstarGPU;
// Create memory space for vectors on GPU
hipMalloc(&dGPU, n*sizeof(float));
hipMalloc(&zsqrGPU, n*sizeof(float));
hipMalloc(&znorm, sizeof(float));
hipMalloc(&x0GPU, n*sizeof(float));
// Container for the results
hipMalloc(&xstarGPU, n*sizeof(float));
/***************** Transfer on GPU *****************/
// Transfers on GPU
hipMemcpy(dGPU, d, n*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(zsqrGPU, z, n*sizeof(float), hipMemcpyHostToDevice);
// We first compute the square and squared norm
hipLaunchKernelGGL(( square_kernel) , dim3(1024), dim3(512), 0, 0, zsqrGPU, znorm, n);
// Initialization of x0 on GPU
hipLaunchKernelGGL(( initialize_x0_kernel) , dim3(1024), dim3(512), 0, 0, x0GPU, dGPU, zsqrGPU, znorm, rho, n);
/***************** Root computation ****************/
// Find roots on GPU
hipLaunchKernelGGL(( find_roots_kernel) , dim3(1024), dim3(512), 0, 0, xstarGPU, x0GPU, dGPU, zsqrGPU, znorm, rho, n, maxit, epsilon);
// Transfer results on CPU to print it
hipMemcpy(xstar, xstarGPU, n*sizeof(float), hipMemcpyDeviceToHost);
// End timer
Tim.add();
// Print the first zeros
// Number of roots to display
int m = 10;
printf("\n********************* RESULTS ********************** \n");
printf("The first %i resulting roots (eigen values) are : \n", m);
print_vector(xstar, m, n);
// Print how long it took
printf("GPU timer for root finding (CPU-GPU and GPU-CPU transfers included) : %f s\n\n", (float)Tim.getsum());
//print_vector(x0_vec, 10, n);
/***************** Freeing Memory ****************/
// Free memory on GPU
hipFree(dGPU);
hipFree(zsqrGPU);
hipFree(znorm);
hipFree(x0GPU);
hipFree(xstarGPU);
// Free memory on CPU
free(d);
free(z);
free(xstar);
}
| main_gpu.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
/**************************************************************
The code in time.h is a part of a course on cuda taught by its authors:
Lokman A. Abbas-Turki
**************************************************************/
#include "timer.h"
// Compare function for qsort
int compare_function(const void *a,const void *b) {
float *x = (float *) a;
float *y = (float *) b;
if (*x < *y) return - 1;
else if (*x > *y) return 1;
return 0;
}
// Generate gaussian vector using Box Muller
void gaussian_vector(float *v, float mu, float sigma, int n) {
for (int i = 0; i<n; i++){
float u1 = (float)rand()/(float)(RAND_MAX);
float u2 = (float)rand()/(float)(RAND_MAX);
v[i] = sigma * (sqrtf( -2 * logf(u1)) * cosf(2 * M_PI * u2)) + mu;
}
}
//Function to print a small vector of floats on host
void print_vector(float *c, int m, int n) {
for (int i=0; i<m; i++){
printf("%f ", c[i]);
printf("\n");
}
}
// Kernel for computing the square of a vector (INPLACE)
// We actually only need z ** 2 in the computations and not z
// The square norm is also computed
__global__ void square_kernel(float *zsqrGPU, float *znorm, int n){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while(idx < n){
float zi = zsqrGPU[idx];
float zsqr_i = zi * zi;
zsqrGPU[idx] = zi * zi;
atomicAdd(znorm, zsqr_i);
idx += gridDim.x * blockDim.x;
}
}
// Device function for computing f (the secular function of interest) at a given point x
__device__ float secfunc(float *dGPU, float *zsqrGPU, float rho, float x, int n) {
float sum = 0;
for (int i=0; i < n; i++){
sum += zsqrGPU[i] / (dGPU[i] - x);
}
return rho + sum;
}
// Device function for computing f' (the prime derivative of the secular function of interest) at a given point x
__device__ float secfunc_prime(float *dGPU, float *zsqrGPU, float x, int n) {
float sum = 0;
for (int i=0; i < n; i++){
int di = dGPU[i];
sum += zsqrGPU[i] / ((di - x) * (di - x));
}
return sum;
}
// Device function for computing f'' (the second derivative of the secular function of interest)
__device__ float secfunc_second(float *dGPU, float *zsqrGPU, float x, int n){
float sum = 0;
for (int i = 0; i < n; i++) {
float di = dGPU[i];
sum += zsqrGPU[i] / ((di - x) * (di - x) * (di - x));
}
return 2 * sum;
}
// Useful intermediary function, see equations (30) and (31) from Li's paper on page 13 and equation (42) on page 20
__device__ float discrimant_int(float a, float b, float c){
if (a <= 0) return (a - sqrtf(a * a - 4 * b * c)) / (2 * c);
else return (2 * b) / (a + sqrtf(a * a - 4 * b *c));
}
// Useful intermediary function, see equation (46) from Li's paper on page 21
__device__ float discrimant_ext(float a, float b, float c){
if (a >= 0) return (a + sqrtf(a * a - 4 * b * c)) / (2 * c);
else return (2 * b) / (a - sqrtf(a * a - 4 * b *c));
}
// h partition of the secular function, used for Initialization
__device__ float h_secfunc(float d_k, float d_kplus1, float zsqr_k, float zsqr_kplus1, float x){
return zsqr_k / (d_k - x) + zsqr_kplus1 / (d_kplus1 - x);
}
// Initialization for interior roots (see section 4 of Li's paper - initial guesses from page 18)
__device__ float initialization_int(float *dGPU, float *zsqrGPU, float rho, int k, int n){
float d_k = dGPU[k];
float d_kplus1 = dGPU[k + 1];
float zsqr_k = zsqrGPU[k];
float zsqr_kplus1 = zsqrGPU[k + 1];
float middle = (d_k + d_kplus1) / 2;
float delta = d_kplus1 - d_k;
float f = secfunc(dGPU, zsqrGPU, rho, middle, n);
float c = f - h_secfunc(d_k, d_kplus1, zsqr_k, zsqr_kplus1, middle);
if (f >= 0){
float a = c * delta + zsqr_k + zsqr_kplus1;
float b = zsqr_k * delta;
return discrimant_int(a, b, c) + d_k;
}
else {
float a = - c * delta + zsqr_k + zsqr_kplus1;
float b = - zsqr_kplus1 * delta;
return discrimant_int(a, b, c) + d_kplus1;
}
}
// Initialization for the exterior root (see section 4 of Li's paper - initial guesses from page 18)
__device__ float initialization_ext(float *dGPU, float *zsqrGPU, float *znorm, float rho, int n){
float d_nminus1 = dGPU[n - 1];
float d_nminus2 = dGPU[n - 2];
float d_n = d_nminus1 + znorm[0] / rho;
float zsqr_nminus1 = zsqrGPU[n - 1];
float zsqr_nminus2 = zsqrGPU[n - 2];
float middle = (d_nminus1 + d_n) / 2;
float f = secfunc(dGPU, zsqrGPU, rho, middle, n);
if (f <= 0){
float hd = h_secfunc(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, d_n);
float c = f - h_secfunc(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, middle);
if (c <= - hd) {
return d_n;
}
else {
float delta = d_nminus1 - d_nminus2;
float a = - c * delta + zsqr_nminus2 + zsqr_nminus1;
float b = - zsqr_nminus1 * delta;
return discrimant_ext(a, b, c) + d_n;
}
}
else {
float delta = d_nminus1 - d_nminus2;
float c = f - h_secfunc(d_nminus2, d_nminus1, zsqr_nminus2, zsqr_nminus1, middle);
float a = - c * delta + zsqr_nminus2 + zsqr_nminus1;
float b = - zsqr_nminus1 * delta;
return discrimant_ext(a, b, c) + d_n;
}
}
// Computation of a from the paper (page 13)
__device__ float a_gragg(float f, float fprime, float delta_k, float delta_kplus1){
return (delta_k + delta_kplus1) * f - delta_k * delta_kplus1 * fprime;
}
// Computation of b from the paper (page 13)
__device__ float b_gragg(float f, float delta_k, float delta_kplus1){
return delta_k * delta_kplus1 * f;
}
// Computation of c from the section Gragg of the paper (page 15)
__device__ float c_gragg(float f, float fprime, float fsecond, float delta_k, float delta_kplus1){
return f - (delta_k + delta_kplus1) * fprime + delta_k * delta_kplus1 * fsecond / 2.0;
}
// Compute of the update for x (eta) for the interior roots (see section 3.1 - Iteration fomulas, pages 12 and 13)
__device__ float eta_int(float d_k, float d_kplus1, float f, float fprime, float fsecond, float x, int k, int n){
float delta_k = d_k - x;
float delta_kplus1 = d_kplus1 - x;
float a = a_gragg(f, fprime, delta_k, delta_kplus1);
float b = b_gragg(f, delta_k, delta_kplus1);
float c = c_gragg(f, fprime, fsecond, delta_k, delta_kplus1);
float eta = discrimant_int(a, b, c);
return eta;
}
// Compute of the update of x (+eta) for the exterior root
__device__ float eta_ext(float d_nminus2, float d_nminus1, float f, float fprime, float fsecond, float x, int n){
float delta_nminus2 = d_nminus2 - x;
float delta_nminus1 = d_nminus1 - x;
float a = a_gragg(f, fprime, delta_nminus2, delta_nminus1);
float b = b_gragg(f, delta_nminus2, delta_nminus1);
float c = c_gragg(f, fprime, fsecond, delta_nminus2, delta_nminus1);
float eta = discrimant_ext(a, b, c);
return eta;
}
// Iterate to find the k-th interior root
__device__ float find_root_int(float *dGPU, float *zsqrGPU, float rho, float x, int k, int n, int maxit, float epsilon){
int i = 0;
float f = secfunc(dGPU, zsqrGPU, rho, x, n);;
float d_k = dGPU[k];
float d_kplus1 = dGPU[k + 1];
while ((i < maxit) && (fabsf(f) > epsilon)){
f = secfunc(dGPU, zsqrGPU, rho, x, n);
float fprime = secfunc_prime(dGPU, zsqrGPU, x, n);
float fsecond = secfunc_second(dGPU, zsqrGPU, x, n);
float eta = eta_int(d_k, d_kplus1, f, fprime, fsecond, x, k, n);
x += eta;
i ++;
}
// Print eigenvalue regularly to check their value and the associated spectral function
if (k%(int)(n/10) == 0){
printf("eigenvalue %d: %f, with spectral function %f after %d iterations \n", k, x, f, i);
}
return x;
}
// Iterate to find the last root (the exterior one)
__device__ float find_root_ext(float *dGPU, float *zsqrGPU, float rho, float x, int n, int maxit, float epsilon){
int i = 0;
float d_nminus2 = dGPU[n - 2];
float d_nminus1 = dGPU[n - 1];
float f = secfunc(dGPU, zsqrGPU, rho, x, n);
while ((i < maxit) && (fabsf(f) > epsilon)){
f = secfunc(dGPU, zsqrGPU, rho, x, n);
float fprime = secfunc_prime(dGPU, zsqrGPU, x, n);
float fsecond = secfunc_second(dGPU, zsqrGPU, x, n);
float eta = eta_ext(d_nminus2, d_nminus1, f, fprime, fsecond, x, n);
x += eta;
i ++;
}
// Print the last eigen value
printf("eigenvalue %d: %f, with spectral function %f after %d iterations \n", n - 1, x, f, i);
return x;
}
// Kernel to launch and distribute the searching of roots among GPU cores
__global__ void find_roots_kernel(float *xstarGPU, float *x0GPU, float *dGPU, float *zsqrGPU, float *znorm, float rho, int n, int maxit, float epsilon){
// We define shared variables for values that are used by multiple threads
__shared__ float rho_shared, epsilon_shared;
__shared__ int n_shared, maxit_shared;
rho_shared = rho;
epsilon_shared = epsilon;
n_shared = n;
maxit_shared = maxit;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// First core gets search of the last root (the exterior one)
if (idx == 0){
float x = x0GPU[n - 1];
xstarGPU[n - 1] = find_root_ext(dGPU, zsqrGPU, rho_shared, x, n_shared, maxit_shared, epsilon_shared);
}
// Each next core searches one interval (interior interval)
else {
while (idx < n) {
float x = x0GPU[idx - 1];
xstarGPU[idx - 1] = find_root_int(dGPU, zsqrGPU, rho_shared, x, idx - 1, n_shared, maxit_shared, epsilon_shared);
// in case we have not launched enough cores to cover all intervals
idx += gridDim.x * blockDim.x;
}
}
}
// Kernel to compute the initial guesses from the paper on GPU
__global__ void initialize_x0_kernel(float *x0GPU, float *dGPU, float *zsqrGPU, float *znorm, float rho, int n){
// We define shared variables for values that are used by multiple threads
__shared__ float znormGPU_shared, rho_shared;
__shared__ int n_shared;
znormGPU_shared = *znorm;
rho_shared = rho;
n_shared = n;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// First core compute the initial guess for last root (the exterior one)
if (idx == 0){
x0GPU[n - 1] = initialization_ext(dGPU, zsqrGPU, &znormGPU_shared, rho_shared, n_shared);
}
// Each next core compute initial guess for one interval (interior interval)
else {
while (idx < n) {
x0GPU[idx - 1] = initialization_int(dGPU, zsqrGPU, rho_shared, idx - 1, n_shared);
idx += gridDim.x * blockDim.x;
}
}
}
// Kernel to "wake up" the GPU
__global__ void wake_up(int *test){
__shared__ int c;
c = 3;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < 1024)
{
test[idx] += c;
}
}
int main (void) {
/****************** Declaration ******************/
// Declare vectors or floats
float *d, *z, *xstar;
// rho parameter
float rho = 10;
// Size of arrow matrix chosen by the user
int n;
printf("\nWhich n (number of roots for the function) do you want? \n");
scanf("%d", &n);
printf("\n \n******************* CHOICE OF N ******************** \n");
printf("n = %d\n", n);
/************* Hyperparameters setting **************/
//Maximum number of iterations
int maxit = 1e4;
//Stopping criterion
float epsilon = 1e-6;
/***************** Data generation *****************/
// Memory allocation
d = (float*)malloc(n*sizeof(float));
z = (float*)malloc(n*sizeof(float));
xstar = (float*)malloc(n*sizeof(float));
// Create instance of class Timer
Timer Tim;
//Fill the vector d with linear function of n
for (int i=0; i < n; i++){
d[i] = 2 * n - i;
}
// sort the vector in ascending order
qsort(d, n, sizeof(float), compare_function);
// Gaussian rank 1 perturbation
float mu_z = 5;
float sigma_z = 1;
gaussian_vector(z, mu_z, sigma_z, n);
/**************** Wake Up GPU *****************/
// We first wake up the GPU
int *testGPU;
cudaMalloc(&testGPU, 1024*sizeof(int));
wake_up <<<1024, 512>>> (testGPU);
cudaFree(testGPU);
/**************** Information Display *****************/
printf("\n\n**************************************************** \n");
printf("*********************** GPU ************************ \n");
printf("**************************************************** \n\n\n");
printf("********************* CONTROLS ********************* \n");
printf("We print the first, the last and 10 %% of the interior eigenvalues as a check \n");
/***************** GPU memory alloc *****************/
// Start timer
Tim.start();
// Declare vectors on GPU
float *dGPU, *zsqrGPU, *znorm, *x0GPU, *xstarGPU;
// Create memory space for vectors on GPU
cudaMalloc(&dGPU, n*sizeof(float));
cudaMalloc(&zsqrGPU, n*sizeof(float));
cudaMalloc(&znorm, sizeof(float));
cudaMalloc(&x0GPU, n*sizeof(float));
// Container for the results
cudaMalloc(&xstarGPU, n*sizeof(float));
/***************** Transfer on GPU *****************/
// Transfers on GPU
cudaMemcpy(dGPU, d, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(zsqrGPU, z, n*sizeof(float), cudaMemcpyHostToDevice);
// We first compute the square and squared norm
square_kernel <<<1024, 512>>> (zsqrGPU, znorm, n);
// Initialization of x0 on GPU
initialize_x0_kernel <<<1024, 512>>> (x0GPU, dGPU, zsqrGPU, znorm, rho, n);
/***************** Root computation ****************/
// Find roots on GPU
find_roots_kernel <<<1024, 512>>> (xstarGPU, x0GPU, dGPU, zsqrGPU, znorm, rho, n, maxit, epsilon);
// Transfer results on CPU to print it
cudaMemcpy(xstar, xstarGPU, n*sizeof(float), cudaMemcpyDeviceToHost);
// End timer
Tim.add();
// Print the first zeros
// Number of roots to display
int m = 10;
printf("\n********************* RESULTS ********************** \n");
printf("The first %i resulting roots (eigen values) are : \n", m);
print_vector(xstar, m, n);
// Print how long it took
printf("GPU timer for root finding (CPU-GPU and GPU-CPU transfers included) : %f s\n\n", (float)Tim.getsum());
//print_vector(x0_vec, 10, n);
/***************** Freeing Memory ****************/
// Free memory on GPU
cudaFree(dGPU);
cudaFree(zsqrGPU);
cudaFree(znorm);
cudaFree(x0GPU);
cudaFree(xstarGPU);
// Free memory on CPU
free(d);
free(z);
free(xstar);
}
|
03f1c4c96426a1833a3db84aef0c1171f4382596.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#include <limits>
// NOTE: CUDA 8 does not allow __device__ lambdas (GPU_LAMBDA) to be defined
// inside other lambdas. CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
template <typename scalar_t>
void add_kernel_impl(TensorIterator& iter, Scalar alpha_scalar) {
auto alpha = alpha_scalar.to<scalar_t>();
gpu_binary_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a + alpha * b;
});
}
static void add_kernel_cuda(TensorIterator& iter, Scalar alpha_scalar) {
AT_DISPATCH_ALL_TYPES_AND_HALF(iter.type(), "add", [&]() {
add_kernel_impl<scalar_t>(iter, alpha_scalar);
});
}
static void sub_kernel_cuda(TensorIterator& iter, Scalar alpha_scalar) {
return add_kernel_cuda(iter, -alpha_scalar);
}
template <typename scalar_t>
void div_kernel_impl(TensorIterator& iter) {
gpu_binary_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a / b;
});
}
template <typename scalar_t>
void div_constant_impl(TensorIterator& iter, scalar_t inv_b) {
gpu_unary_kernel(iter, [inv_b]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a * inv_b;
});
}
static void div_kernel_cuda(TensorIterator& iter) {
if (isIntegralType(iter.type().scalarType())) {
AT_DISPATCH_INTEGRAL_TYPES(iter.type(), "div", [&]() {
div_kernel_impl<scalar_t>(iter);
});
} else if (iter.is_cpu_scalar(2)) {
// optimization for floating-point types: if the second operand is a CPU
// scalar, compute a * reciprocal(b). Note that this may lose one bit of
// precision compared to computing the division.
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.type(), "div", [&]() {
auto inv_b = scalar_t(1.0 / iter.scalar_value<scalar_t>(2));
iter.remove_operand(2);
div_constant_impl<scalar_t>(iter, inv_b);
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.type(), "div", [&]() {
div_kernel_impl<scalar_t>(iter);
});
}
}
template <typename scalar_t>
void mul_kernel_impl(TensorIterator& iter) {
gpu_binary_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a * b;
});
}
static void mul_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND_HALF(iter.type(), "mul", [&]() {
mul_kernel_impl<scalar_t>(iter);
});
}
REGISTER_DISPATCH(add_stub, &add_kernel_cuda);
REGISTER_DISPATCH(sub_stub, &sub_kernel_cuda);
REGISTER_DISPATCH(div_stub, &div_kernel_cuda);
REGISTER_DISPATCH(mul_stub, &mul_kernel_cuda);
}} // namespace at::native
| 03f1c4c96426a1833a3db84aef0c1171f4382596.cu | #include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#include <limits>
// NOTE: CUDA 8 does not allow __device__ lambdas (GPU_LAMBDA) to be defined
// inside other lambdas. CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
template <typename scalar_t>
void add_kernel_impl(TensorIterator& iter, Scalar alpha_scalar) {
auto alpha = alpha_scalar.to<scalar_t>();
gpu_binary_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a + alpha * b;
});
}
static void add_kernel_cuda(TensorIterator& iter, Scalar alpha_scalar) {
AT_DISPATCH_ALL_TYPES_AND_HALF(iter.type(), "add", [&]() {
add_kernel_impl<scalar_t>(iter, alpha_scalar);
});
}
static void sub_kernel_cuda(TensorIterator& iter, Scalar alpha_scalar) {
return add_kernel_cuda(iter, -alpha_scalar);
}
template <typename scalar_t>
void div_kernel_impl(TensorIterator& iter) {
gpu_binary_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a / b;
});
}
template <typename scalar_t>
void div_constant_impl(TensorIterator& iter, scalar_t inv_b) {
gpu_unary_kernel(iter, [inv_b]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a * inv_b;
});
}
static void div_kernel_cuda(TensorIterator& iter) {
if (isIntegralType(iter.type().scalarType())) {
AT_DISPATCH_INTEGRAL_TYPES(iter.type(), "div", [&]() {
div_kernel_impl<scalar_t>(iter);
});
} else if (iter.is_cpu_scalar(2)) {
// optimization for floating-point types: if the second operand is a CPU
// scalar, compute a * reciprocal(b). Note that this may lose one bit of
// precision compared to computing the division.
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.type(), "div", [&]() {
auto inv_b = scalar_t(1.0 / iter.scalar_value<scalar_t>(2));
iter.remove_operand(2);
div_constant_impl<scalar_t>(iter, inv_b);
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.type(), "div", [&]() {
div_kernel_impl<scalar_t>(iter);
});
}
}
template <typename scalar_t>
void mul_kernel_impl(TensorIterator& iter) {
gpu_binary_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a * b;
});
}
static void mul_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND_HALF(iter.type(), "mul", [&]() {
mul_kernel_impl<scalar_t>(iter);
});
}
REGISTER_DISPATCH(add_stub, &add_kernel_cuda);
REGISTER_DISPATCH(sub_stub, &sub_kernel_cuda);
REGISTER_DISPATCH(div_stub, &div_kernel_cuda);
REGISTER_DISPATCH(mul_stub, &mul_kernel_cuda);
}} // namespace at::native
|
ec5963aca3ed05134da5da3d3c14f93e250fef6f.hip | // !!! This is a file automatically generated by hipify!!!
#define RAND(Y,X) Y = Y * 1103515245 +12345;X= (unsigned int)(Y >> 16) & 32767
/*--------------------------------------------------------------------------
Author: Alan Diamond
--------------------------------------------------------------------------
Main entry point for the experiment using the classifier design based on Schmuker 2014 hardware classifier
--------------------------------------------------------------------------*/
#include "experiment.h"
#include <time.h>
#include <algorithm> //for std:find
//for stat command (file info interrgation
#include <sys/types.h>
#include <sys/stat.h>
#ifndef S_ISDIR
#define S_ISDIR(mode) (((mode) & S_IFMT) == S_IFDIR)
#endif
//this class does most the work, controlled by this experiment
Schmuker2014_classifier classifier;
//generic parameter struct to allow generic testing and reporting of different parameter settings
typedef struct Parameter {
string name;
string value;
} Parameter;
/*-----------------------------------------------------------------
Utility to determine if named directory exists
-----------------------------------------------------------------*/
bool directoryExists(string const &path) {
struct stat st;
if (stat(path.c_str(), &st) == -1) {
return false;
}
return S_ISDIR(st.st_mode);
}
/*-----------------------------------------------------------------
Utility to create a directory on Linux if not exisiting
-----------------------------------------------------------------*/
bool createDirectory(string path) {
if (directoryExists(path)) {
cout << "INFO: instructed to create directory " << path << " that already exists. Ignoring.." << endl;
return false;
} else {
string cmd = "mkdir '" + path + "'";
return system(cmd.c_str());
}
}
/*-----------------------------------------------------------------
Utilities to get the average and stdDev from a vector of floats
-----------------------------------------------------------------*/
float getAverage(vector<float> &v)
{
float total = 0.0f;
for (vector<float>::iterator it = v.begin(); it != v.end(); ++it)
total += *it;
return total / v.size();
}
float getStdDev(vector<float> &v, float avg)
{
float totalDiffSquared = 0.0f;
for (vector<float>::iterator it = v.begin(); it != v.end(); ++it) {
float diff = (avg - *it);
totalDiffSquared += diff*diff;
}
float variance = totalDiffSquared / v.size();
return sqrtf(variance);
}
/*-----------------------------------------------------------------
Utility to write any text file to console
-----------------------------------------------------------------*/
bool printTextFile(string path)
{
ifstream file(path.c_str());
if(!file.is_open()) return false;
while (!file.eof()) {
string line;
file >> line;
printf("%s\n",line.c_str());
}
file.close();
return true;
}
/*-----------------------------------------------------------------
Uses a timestamp plus network parameters used to create an id string unique to this run
-----------------------------------------------------------------*/
string getUniqueRunId()
{
string timestamp = toString(time (NULL));
string id = timestamp +
" " + classifier.datasetName;
return id;
}
/*-----------------------------------------------------------------
Write to matching file the parameters used to create this run
-----------------------------------------------------------------*/
void outputRunParameters()
{
string paramFilename = classifier.outputDir + "/" + classifier.uniqueRunId + " Run Parameters.txt";
FILE * file = fopen(paramFilename.c_str(),"w");
fprintf(file,"DATASET_NAME\t\t%s\n",toString(DATASET_NAME).c_str());
fprintf(file,"DT\t\t%f\n",DT);
fprintf(file,"NUM_VR\t\t%d\n",NUM_VR);
fprintf(file,"NUM_FEATURES\t\t%d\n",NUM_FEATURES);
fprintf(file,"NUM_CLASSES\t\t%d\n",NUM_CLASSES);
fprintf(file,"NETWORK_SCALE\t\t%d\n",NETWORK_SCALE);
fprintf(file,"CLUST_SIZE_RN\t\t%d\n",CLUST_SIZE_RN);
fprintf(file,"CLUST_SIZE_PN\t\t%d\n",CLUST_SIZE_PN);
fprintf(file,"CLUST_SIZE_AN\t\t%d\n",CLUST_SIZE_AN);
fprintf(file,"SYNAPSE_TAU_RNPN\t\t%f\n",SYNAPSE_TAU_RNPN);
fprintf(file,"SYNAPSE_TAU_PNPN\t\t%f\n",SYNAPSE_TAU_PNPN);
fprintf(file,"SYNAPSE_TAU_PNAN\t\t%f\n",SYNAPSE_TAU_PNAN);
fprintf(file,"SYNAPSE_TAU_ANAN\t\t%f\n",SYNAPSE_TAU_ANAN);
fprintf(file,"MAX_FIRING_RATE_HZ\t\t%d\n",MAX_FIRING_RATE_HZ);
fprintf(file,"MIN_FIRING_RATE_HZ\t\t%d\n",MIN_FIRING_RATE_HZ);
fprintf(file,"GLOBAL_WEIGHT_SCALING\t\t%f\n",GLOBAL_WEIGHT_SCALING);
fprintf(file,"WEIGHT_RN_PN\t\t%f\n",WEIGHT_RN_PN);
fprintf(file,"WEIGHT_WTA_PN_PN\t\t%f\n",WEIGHT_WTA_PN_PN);
fprintf(file,"WEIGHT_WTA_AN_AN\t\t%f\n",WEIGHT_WTA_AN_AN);
fprintf(file,"CONNECTIVITY_RN_PN\t\t%f\n",CONNECTIVITY_RN_PN);
fprintf(file,"CONNECTIVITY_PN_PN\t\t%f\n",CONNECTIVITY_PN_PN);
fprintf(file,"CONNECTIVITY_AN_AN\t\t%f\n",CONNECTIVITY_AN_AN);
fprintf(file,"CONNECTIVITY_PN_AN\t\t%f\n",CONNECTIVITY_PN_AN);
fprintf(file,"MIN_WEIGHT_PN_AN\t\t%f\n",MIN_WEIGHT_PN_AN);
fprintf(file,"MAX_WEIGHT_PN_AN\t\t%f\n",MAX_WEIGHT_PN_AN);
fprintf(file,"WEIGHT_DELTA_PN_AN\t\t%f\n",WEIGHT_DELTA_PN_AN);
fprintf(file,"PLASTICITY_INTERVAL_MS\t\t%u\n",PLASTICITY_INTERVAL_MS);
fprintf(file,"SPIKING_ACTIVITY_THRESHOLD_HZ\t\t%d\n",SPIKING_ACTIVITY_THRESHOLD_HZ);
fprintf(file,"TOTAL_RECORDINGS\t\t%d\n",TOTAL_RECORDINGS);
fprintf(file,"N_FOLDING\t\t%d\n",N_FOLDING);
fprintf(file,"RECORDING_TIME_MS\t\t%d\n",RECORDING_TIME_MS);
fclose(file);
}
/*-----------------------------------------------------------------
Load the specified recording and apply it to the classifier as a set of input rates
-----------------------------------------------------------------*/
bool applyInputToClassifier(UINT recordingIdx,bool usePlasticity)
{
//printf("Presenting recording %u to classifier.. \n",recordingIdx);
classifier.resetOverallWinner();
//printf( "Loading data recording %d ..\n", recordingIdx);
//get the set of input rate data for the recording (this will be first generated from the sensor data and the VR set, then cached in a uniquely named file)
classifier.generate_or_load_inputrates_dataset(recordingIdx);
//move the rates data across to the device
classifier.update_input_data_on_device();
//get the correct class label for this recording and store
classifier.setCorrectClass(recordingIdx);
//run the model for the duration of the recording, collecting the relevant spike sets on each timestep (if raster plot specified in FLAGS )
string filename_rasterPlot = classifier.datasetName + " " + classifier.uniqueRunId + " Recording-" + toString(recordingIdx) + " Class-" + toString(classifier.correctClass) + " Raster plot data.txt";
/*
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
*/
classifier.run(RECORDING_TIME_MS,filename_rasterPlot,usePlasticity);
/*
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
hipEventDestroy( start );
hipEventDestroy( stop );
printf("Run method completed in %f\n", time);
*/
int winner = classifier.calculateOverallWinner();
bool classifiedCorrectly = winner == classifier.correctClass;
string yesNo = classifiedCorrectly ? "YES" : "NO";
//cout << "Classified Correctly? " << yesNo << endl;
return classifiedCorrectly;
}
/*--------------------------------------------------------------------------
Utility function to determine if a passed vector of ints contains the specified value
-------------------------------------------------------------------------- */
bool vectorContains(vector<int> &vec ,int lookingFor)
{
vector<int>::iterator it = find(vec.begin(), vec.end(), lookingFor);
return (it != vec.end());
}
/*--------------------------------------------------------------------------
assign default values to the main classifier parameters
-------------------------------------------------------------------------- */
void setDefaultParamValues()
{
classifier.param_SPIKING_ACTIVITY_THRESHOLD_HZ = SPIKING_ACTIVITY_THRESHOLD_HZ;
classifier.param_MAX_FIRING_RATE_HZ = MAX_FIRING_RATE_HZ;
classifier.param_MIN_FIRING_RATE_HZ = MIN_FIRING_RATE_HZ;
classifier.param_GLOBAL_WEIGHT_SCALING = GLOBAL_WEIGHT_SCALING;
classifier.param_WEIGHT_RN_PN = WEIGHT_RN_PN;
classifier.param_CONNECTIVITY_RN_PN = CONNECTIVITY_RN_PN;
classifier.param_WEIGHT_WTA_PN_PN = WEIGHT_WTA_PN_PN;
classifier.param_WEIGHT_WTA_AN_AN = WEIGHT_WTA_AN_AN;
classifier.param_CONNECTIVITY_PN_PN = CONNECTIVITY_PN_PN;
classifier.param_CONNECTIVITY_AN_AN = CONNECTIVITY_AN_AN;
classifier.param_CONNECTIVITY_PN_AN = CONNECTIVITY_PN_AN;
classifier.param_MIN_WEIGHT_PN_AN = MIN_WEIGHT_PN_AN;
classifier.param_MAX_WEIGHT_PN_AN = MAX_WEIGHT_PN_AN;
classifier.param_WEIGHT_DELTA_PN_AN = WEIGHT_DELTA_PN_AN;
classifier.param_PLASTICITY_INTERVAL_MS = PLASTICITY_INTERVAL_MS;
}
/*--------------------------------------------------------------------------
This function is the entry point for running the experiment
-------------------------------------------------------------------------- */
int main(int argc, char *argv[])
{
if (argc < 2)
{
fprintf(stderr, "usage: experiment <output-dir> \n");
return 1;
}
string basename = toString(argv[1]);
//-----------------------------------------------------------------
//NETWORK INITIALISATION
printf( "Network initialisation commenced..\n");
#ifdef FLAG_RUN_ON_CPU
printf("Simulation will be run on Host CPU\n");
#else
printf("Simulation will be run on Device GPU\n");
#endif
classifier.resetDevice(); //clear out any possible memory leaks etc from previous runs
//set up file locations
classifier.datasetName = DATASET_NAME;
classifier.recordingsDir = basename + "/" + RECORDINGS_DIR;
classifier.cacheDir = basename + "/" + CACHE_DIR;
createDirectory(classifier.cacheDir);
classifier.outputDir = basename + "/" + OUTPUT_DIR;
createDirectory(classifier.outputDir);
classifier.uniqueRunId = getUniqueRunId();
classifier.startLog();
printf( "Recordings input directory set to %s\n", classifier.recordingsDir.c_str());
printf( "Cache directory set to %s\n", classifier.cacheDir.c_str());
printf( "Output directory set to %s\n", classifier.outputDir.c_str());
//Uncomment and edit fn to generate simulated timeseries data
//classifier.generateSimulatedTimeSeriesData();
//exit(1);
//assign default values to the main classifier parameters
setDefaultParamValues();
//allocate the memory arrays used in the network on the host and device
classifier.allocateHostAndDeviceMemory();
//seed the random number generator for creating random connections
srand(time(NULL));
//srand(222); //TODO reset
//initialise the set of weights for the SPARSE 1:1 subcluster-subcluster synapses RN-PN (GeNN has no automatic function for what we need)
classifier.initialiseWeights_SPARSE_RN_PN();
//initialise the set of weights for the DENSE subcluster-subcluster WTA synapses PN-PN (GeNN has no automatic function for what we need)
classifier.initialiseWeights_WTA_PN_PN();
//NB: This is now called at the start of each folding trial (see main method) to reset the plastic weights
//initialise the set of weights for the DENSE plastic synapses PN-AN (GeNN has no automatic function for what we need)
//classifier.initialiseWeights_DENSE_PN_AN();
//initialise the set of weights for the DENSE subcluster-subcluster WTA synapses AN-AN (GeNN has no automatic function for what we need)
classifier.initialiseWeights_WTA_AN_AN();
//load set of virtual receptor points VR to be used to generate input levels
classifier.load_VR_data();
//allocate storage on CPU and GPU for the dataset of input rates to the poisson neurons
classifier.initialiseInputData();
//load classes labelling the recording data sets
classifier.loadClassLabels();
// Finally, move all the data arrays initialised across to the device
classifier.populateDeviceMemory();
//re-seed the random number generator after device setup (which used srand in its own way)
srand(time(NULL));
//srand(222); //TODO reset
outputRunParameters();
printf( "Network initialisation completed.\n");
//-----------------------------------------------------------------
//SET UP TRAINING AND TESTING DATASETS
//define the indexes of the test set
unsigned int sizeTestingSet = TOTAL_RECORDINGS / N_FOLDING ; //e.g. 100 recordings into 5 = 20 recordings per bucket
//set up a list of all the recording id's shuffled into a random order, this allows a simple linear split of the training and test data to achieve cross validation
vector<int> shuffledRecordings;
for (int i=0; i<TOTAL_RECORDINGS; i++ ) {//enter all recordings in order
shuffledRecordings.push_back(i);
}
//printf("Re-enable random shuffle!\n");//TODO reset
random_shuffle(shuffledRecordings.begin(),shuffledRecordings.end());
//re-seed the random number generator after shuffle fn (which may have used srand in its own way)
srand(time(NULL));
//srand(222);//TODO reset
//-----------------------------------------------------------------
//set up parameter exploration , if any
//-----------------------------------------------------------------
//string paramName = "SpkActivityThresholdHz";
//int paramValues[] {3,4,6};
//string paramName = "WeightDeltaPNAN";
//float paramValues[] {0.005,0.01,0.025,0.05,0.1,0.2,0.4};
string paramName = "PLASTICITY_INTERVAL_MS";
//float paramValues[] {50,100,200,330,500,1000};
float paramValues[]= {330};
//-----------------------------------------------------------------
//track the overall performance of the classifier
string overallResultsFilename = classifier.outputDir + "/" + classifier.uniqueRunId + " Overall Results for varying " + paramName + ".txt";
FILE * overallResultsFile = fopen(overallResultsFilename.c_str(),"w");
fprintf(overallResultsFile,"%s,AvgPercentScore,StdDev\n",paramName.c_str());
//Run full cross validation, stepping through parameter values supplied
for (int paramIndex = 0; paramIndex < sizeof(paramValues)/sizeof(paramValues[0]); paramIndex++) {
//Apply next param value
Parameter param = {paramName,toString(paramValues[paramIndex])};
//classifier.param_SPIKING_ACTIVITY_THRESHOLD_HZ = paramValues[paramIndex];
//classifier.param_WEIGHT_DELTA_PN_AN = paramValues[paramIndex];
classifier.param_PLASTICITY_INTERVAL_MS = paramValues[paramIndex];
//track the performance across each param setting, per folding
string perParamResultsFilename = classifier.outputDir + "/" + classifier.uniqueRunId + " Totalled Results for " + param.name + "-" + param.value + ".txt";
FILE * perParamResultsFile = fopen(perParamResultsFilename.c_str(),"w");
fprintf(perParamResultsFile,"%s,Folding,Stage,Correct,OutOf,Percent\n",param.name.c_str());
//track the detailed performance of the classifier
string individualResultsFilename = classifier.outputDir + "/" + classifier.uniqueRunId + " Individual Results for " + param.name + "-" + param.value + ".txt";
FILE * individualResultsFile = fopen(individualResultsFilename.c_str(),"w");
fprintf(individualResultsFile,"%s,folding,recordingIdx,classifierSelectedClass,correctClass\n",param.name.c_str());
printf("Individual training results will be saved to the file: %s\n", individualResultsFilename.c_str());
int totalTestScore = 0;
int totalTestCount = 0;
vector<float> vecFoldingResults;//holder for the result of each folding, will be averaged/stdDev at the end of cross validation
for (int folding = 0; folding < N_FOLDING; folding++) {
unsigned int firstTestingSet = folding * sizeTestingSet;
unsigned int lastTestingSet = firstTestingSet + sizeTestingSet -1;
//reset the weights for the plastic synapses PN-AN to a random selection
classifier.initialiseWeights_DENSE_PN_AN();
//update on the device
classifier.updateWeights_PN_AN_on_device();
//-----------------------------------------------------------------
//RUN TRAINING
string stage = "training";
printf( "%s %s, folding %u, %s stage commenced..\n",param.name.c_str(),param.value.c_str(),folding,stage.c_str());
UINT trainingCount = 0;
UINT trainingScore = 0;
//Repeat the training set X times, for more exposure to early observations
for (int rpt = 0; rpt < REPEAT_LEARNING_SET ; rpt++) {
timer.startTimer();
//for each recording in the training set
for (int i=0; i<TOTAL_RECORDINGS; i++ ) {
//leave out nominated test data set, only use training data set
if (i<firstTestingSet || i>lastTestingSet)
{
UINT recordingIdx = shuffledRecordings[i];
bool usePlasticity = true;
bool classifiedCorrectly = applyInputToClassifier(recordingIdx,usePlasticity);
//write classifier decision to file alongside correct class
fprintf(individualResultsFile,"%s,%u,%s,%u,%u,%u\n", param.value.c_str(), folding, stage.c_str(), recordingIdx , classifier.winningClass, classifier.correctClass);
trainingCount++;
if (classifiedCorrectly) trainingScore++;
}
}//end of recordings
timer.stopTimer();
printf( "Presented %u recordings for training in time:%f\n",TOTAL_RECORDINGS-sizeTestingSet, timer.getElapsedTime());
} //end repeats
float trainingPercent =100*((float)trainingScore)/((float)trainingCount);
printf( "%s=%s, Folding %u: Classifier training completed. Score:%u/%u (%f percent) \n",param.name.c_str(),param.value.c_str(),folding,trainingScore,trainingCount,trainingPercent);
fprintf(perParamResultsFile,"%s,%u,%s,%u,%u,%f\n",param.value.c_str(),folding,stage.c_str(),trainingScore,trainingCount,trainingPercent);
//-----------------------------------------------------------------
//TESTING
stage = "testing";
printf( "%s %s, folding %u, %s stage commenced..\n",param.name.c_str(),param.value.c_str(),folding,stage.c_str());
UINT testCount = 0;
UINT testScore = 0;
printf( "Classifier testing commenced..\n");
//for each recording in the test set
for (int i=firstTestingSet; i<=lastTestingSet; i++ )
{
int recordingIdx = shuffledRecordings[i];
bool classifiedCorrectly = applyInputToClassifier(recordingIdx,false);//no plasticity
//write classifier decision to file alongside correct class
fprintf(individualResultsFile,"%s,%u,%s,%u,%u,%u\n", param.value.c_str(), folding, stage.c_str(), recordingIdx , classifier.winningClass, classifier.correctClass);
testCount++;
if (classifiedCorrectly) testScore++;
}
float testPercent = 100*((float)testScore)/((float)testCount);
printf( "Folding %u: Classifier Testing completed. Score:%u/%u (%f percent) \n",folding, testScore,testCount,testPercent);
fprintf(perParamResultsFile,"%s,%u,%s,%u,%u,%f\n",param.value.c_str(), folding,stage.c_str(),testScore,testCount,testPercent);
//save the score for this folding for later processing (we will want the std dev)
vecFoldingResults.push_back(testPercent);
totalTestScore += testScore;
totalTestCount += testCount;
} //goto next folding
//end of foldings
fclose(perParamResultsFile);
fclose(individualResultsFile);
//analyze per param results
float avg = getAverage(vecFoldingResults);
float stdDev = getStdDev(vecFoldingResults,avg);
float totalTestPercent = 100*((float)totalTestScore)/((float)totalTestCount);
printf( "Classifier Training/Testing completed for %s=%s. Total Score:%u/%u (%f percent) \n", param.name.c_str(),param.value.c_str(),totalTestScore,totalTestCount,totalTestPercent);
fprintf(overallResultsFile,"%s,%f,%f\n",param.value.c_str(),avg,stdDev);
} //goto next param value
//end of param exploration
fclose(overallResultsFile);
//shut down device before classifier instance destroyed
//classifier.clearDownDevice();
//This is done in the classifier destructor which also clears CPU memory etc
printTextFile(overallResultsFilename);
printf( "End of Run %s\n", classifier.uniqueRunId.c_str());
return 0;
}//END OF MAIN
| ec5963aca3ed05134da5da3d3c14f93e250fef6f.cu | #define RAND(Y,X) Y = Y * 1103515245 +12345;X= (unsigned int)(Y >> 16) & 32767
/*--------------------------------------------------------------------------
Author: Alan Diamond
--------------------------------------------------------------------------
Main entry point for the experiment using the classifier design based on Schmuker 2014 hardware classifier
--------------------------------------------------------------------------*/
#include "experiment.h"
#include <time.h>
#include <algorithm> //for std:find
//for stat command (file info interrgation
#include <sys/types.h>
#include <sys/stat.h>
#ifndef S_ISDIR
#define S_ISDIR(mode) (((mode) & S_IFMT) == S_IFDIR)
#endif
//this class does most the work, controlled by this experiment
Schmuker2014_classifier classifier;
//generic parameter struct to allow generic testing and reporting of different parameter settings
typedef struct Parameter {
string name;
string value;
} Parameter;
/*-----------------------------------------------------------------
Utility to determine if named directory exists
-----------------------------------------------------------------*/
bool directoryExists(string const &path) {
struct stat st;
if (stat(path.c_str(), &st) == -1) {
return false;
}
return S_ISDIR(st.st_mode);
}
/*-----------------------------------------------------------------
Utility to create a directory on Linux if not exisiting
-----------------------------------------------------------------*/
bool createDirectory(string path) {
if (directoryExists(path)) {
cout << "INFO: instructed to create directory " << path << " that already exists. Ignoring.." << endl;
return false;
} else {
string cmd = "mkdir '" + path + "'";
return system(cmd.c_str());
}
}
/*-----------------------------------------------------------------
Utilities to get the average and stdDev from a vector of floats
-----------------------------------------------------------------*/
float getAverage(vector<float> &v)
{
float total = 0.0f;
for (vector<float>::iterator it = v.begin(); it != v.end(); ++it)
total += *it;
return total / v.size();
}
float getStdDev(vector<float> &v, float avg)
{
float totalDiffSquared = 0.0f;
for (vector<float>::iterator it = v.begin(); it != v.end(); ++it) {
float diff = (avg - *it);
totalDiffSquared += diff*diff;
}
float variance = totalDiffSquared / v.size();
return sqrtf(variance);
}
/*-----------------------------------------------------------------
Utility to write any text file to console
-----------------------------------------------------------------*/
bool printTextFile(string path)
{
ifstream file(path.c_str());
if(!file.is_open()) return false;
while (!file.eof()) {
string line;
file >> line;
printf("%s\n",line.c_str());
}
file.close();
return true;
}
/*-----------------------------------------------------------------
Uses a timestamp plus network parameters used to create an id string unique to this run
-----------------------------------------------------------------*/
string getUniqueRunId()
{
string timestamp = toString(time (NULL));
string id = timestamp +
" " + classifier.datasetName;
return id;
}
/*-----------------------------------------------------------------
Write to matching file the parameters used to create this run
-----------------------------------------------------------------*/
void outputRunParameters()
{
string paramFilename = classifier.outputDir + "/" + classifier.uniqueRunId + " Run Parameters.txt";
FILE * file = fopen(paramFilename.c_str(),"w");
fprintf(file,"DATASET_NAME\t\t%s\n",toString(DATASET_NAME).c_str());
fprintf(file,"DT\t\t%f\n",DT);
fprintf(file,"NUM_VR\t\t%d\n",NUM_VR);
fprintf(file,"NUM_FEATURES\t\t%d\n",NUM_FEATURES);
fprintf(file,"NUM_CLASSES\t\t%d\n",NUM_CLASSES);
fprintf(file,"NETWORK_SCALE\t\t%d\n",NETWORK_SCALE);
fprintf(file,"CLUST_SIZE_RN\t\t%d\n",CLUST_SIZE_RN);
fprintf(file,"CLUST_SIZE_PN\t\t%d\n",CLUST_SIZE_PN);
fprintf(file,"CLUST_SIZE_AN\t\t%d\n",CLUST_SIZE_AN);
fprintf(file,"SYNAPSE_TAU_RNPN\t\t%f\n",SYNAPSE_TAU_RNPN);
fprintf(file,"SYNAPSE_TAU_PNPN\t\t%f\n",SYNAPSE_TAU_PNPN);
fprintf(file,"SYNAPSE_TAU_PNAN\t\t%f\n",SYNAPSE_TAU_PNAN);
fprintf(file,"SYNAPSE_TAU_ANAN\t\t%f\n",SYNAPSE_TAU_ANAN);
fprintf(file,"MAX_FIRING_RATE_HZ\t\t%d\n",MAX_FIRING_RATE_HZ);
fprintf(file,"MIN_FIRING_RATE_HZ\t\t%d\n",MIN_FIRING_RATE_HZ);
fprintf(file,"GLOBAL_WEIGHT_SCALING\t\t%f\n",GLOBAL_WEIGHT_SCALING);
fprintf(file,"WEIGHT_RN_PN\t\t%f\n",WEIGHT_RN_PN);
fprintf(file,"WEIGHT_WTA_PN_PN\t\t%f\n",WEIGHT_WTA_PN_PN);
fprintf(file,"WEIGHT_WTA_AN_AN\t\t%f\n",WEIGHT_WTA_AN_AN);
fprintf(file,"CONNECTIVITY_RN_PN\t\t%f\n",CONNECTIVITY_RN_PN);
fprintf(file,"CONNECTIVITY_PN_PN\t\t%f\n",CONNECTIVITY_PN_PN);
fprintf(file,"CONNECTIVITY_AN_AN\t\t%f\n",CONNECTIVITY_AN_AN);
fprintf(file,"CONNECTIVITY_PN_AN\t\t%f\n",CONNECTIVITY_PN_AN);
fprintf(file,"MIN_WEIGHT_PN_AN\t\t%f\n",MIN_WEIGHT_PN_AN);
fprintf(file,"MAX_WEIGHT_PN_AN\t\t%f\n",MAX_WEIGHT_PN_AN);
fprintf(file,"WEIGHT_DELTA_PN_AN\t\t%f\n",WEIGHT_DELTA_PN_AN);
fprintf(file,"PLASTICITY_INTERVAL_MS\t\t%u\n",PLASTICITY_INTERVAL_MS);
fprintf(file,"SPIKING_ACTIVITY_THRESHOLD_HZ\t\t%d\n",SPIKING_ACTIVITY_THRESHOLD_HZ);
fprintf(file,"TOTAL_RECORDINGS\t\t%d\n",TOTAL_RECORDINGS);
fprintf(file,"N_FOLDING\t\t%d\n",N_FOLDING);
fprintf(file,"RECORDING_TIME_MS\t\t%d\n",RECORDING_TIME_MS);
fclose(file);
}
/*-----------------------------------------------------------------
Load the specified recording and apply it to the classifier as a set of input rates
-----------------------------------------------------------------*/
bool applyInputToClassifier(UINT recordingIdx,bool usePlasticity)
{
//printf("Presenting recording %u to classifier.. \n",recordingIdx);
classifier.resetOverallWinner();
//printf( "Loading data recording %d ..\n", recordingIdx);
//get the set of input rate data for the recording (this will be first generated from the sensor data and the VR set, then cached in a uniquely named file)
classifier.generate_or_load_inputrates_dataset(recordingIdx);
//move the rates data across to the device
classifier.update_input_data_on_device();
//get the correct class label for this recording and store
classifier.setCorrectClass(recordingIdx);
//run the model for the duration of the recording, collecting the relevant spike sets on each timestep (if raster plot specified in FLAGS )
string filename_rasterPlot = classifier.datasetName + " " + classifier.uniqueRunId + " Recording-" + toString(recordingIdx) + " Class-" + toString(classifier.correctClass) + " Raster plot data.txt";
/*
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
*/
classifier.run(RECORDING_TIME_MS,filename_rasterPlot,usePlasticity);
/*
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
printf("Run method completed in %f\n", time);
*/
int winner = classifier.calculateOverallWinner();
bool classifiedCorrectly = winner == classifier.correctClass;
string yesNo = classifiedCorrectly ? "YES" : "NO";
//cout << "Classified Correctly? " << yesNo << endl;
return classifiedCorrectly;
}
/*--------------------------------------------------------------------------
Utility function to determine if a passed vector of ints contains the specified value
-------------------------------------------------------------------------- */
bool vectorContains(vector<int> &vec ,int lookingFor)
{
vector<int>::iterator it = find(vec.begin(), vec.end(), lookingFor);
return (it != vec.end());
}
/*--------------------------------------------------------------------------
assign default values to the main classifier parameters
-------------------------------------------------------------------------- */
void setDefaultParamValues()
{
classifier.param_SPIKING_ACTIVITY_THRESHOLD_HZ = SPIKING_ACTIVITY_THRESHOLD_HZ;
classifier.param_MAX_FIRING_RATE_HZ = MAX_FIRING_RATE_HZ;
classifier.param_MIN_FIRING_RATE_HZ = MIN_FIRING_RATE_HZ;
classifier.param_GLOBAL_WEIGHT_SCALING = GLOBAL_WEIGHT_SCALING;
classifier.param_WEIGHT_RN_PN = WEIGHT_RN_PN;
classifier.param_CONNECTIVITY_RN_PN = CONNECTIVITY_RN_PN;
classifier.param_WEIGHT_WTA_PN_PN = WEIGHT_WTA_PN_PN;
classifier.param_WEIGHT_WTA_AN_AN = WEIGHT_WTA_AN_AN;
classifier.param_CONNECTIVITY_PN_PN = CONNECTIVITY_PN_PN;
classifier.param_CONNECTIVITY_AN_AN = CONNECTIVITY_AN_AN;
classifier.param_CONNECTIVITY_PN_AN = CONNECTIVITY_PN_AN;
classifier.param_MIN_WEIGHT_PN_AN = MIN_WEIGHT_PN_AN;
classifier.param_MAX_WEIGHT_PN_AN = MAX_WEIGHT_PN_AN;
classifier.param_WEIGHT_DELTA_PN_AN = WEIGHT_DELTA_PN_AN;
classifier.param_PLASTICITY_INTERVAL_MS = PLASTICITY_INTERVAL_MS;
}
/*--------------------------------------------------------------------------
This function is the entry point for running the experiment
-------------------------------------------------------------------------- */
int main(int argc, char *argv[])
{
if (argc < 2)
{
fprintf(stderr, "usage: experiment <output-dir> \n");
return 1;
}
string basename = toString(argv[1]);
//-----------------------------------------------------------------
//NETWORK INITIALISATION
printf( "Network initialisation commenced..\n");
#ifdef FLAG_RUN_ON_CPU
printf("Simulation will be run on Host CPU\n");
#else
printf("Simulation will be run on Device GPU\n");
#endif
classifier.resetDevice(); //clear out any possible memory leaks etc from previous runs
//set up file locations
classifier.datasetName = DATASET_NAME;
classifier.recordingsDir = basename + "/" + RECORDINGS_DIR;
classifier.cacheDir = basename + "/" + CACHE_DIR;
createDirectory(classifier.cacheDir);
classifier.outputDir = basename + "/" + OUTPUT_DIR;
createDirectory(classifier.outputDir);
classifier.uniqueRunId = getUniqueRunId();
classifier.startLog();
printf( "Recordings input directory set to %s\n", classifier.recordingsDir.c_str());
printf( "Cache directory set to %s\n", classifier.cacheDir.c_str());
printf( "Output directory set to %s\n", classifier.outputDir.c_str());
//Uncomment and edit fn to generate simulated timeseries data
//classifier.generateSimulatedTimeSeriesData();
//exit(1);
//assign default values to the main classifier parameters
setDefaultParamValues();
//allocate the memory arrays used in the network on the host and device
classifier.allocateHostAndDeviceMemory();
//seed the random number generator for creating random connections
srand(time(NULL));
//srand(222); //TODO reset
//initialise the set of weights for the SPARSE 1:1 subcluster-subcluster synapses RN-PN (GeNN has no automatic function for what we need)
classifier.initialiseWeights_SPARSE_RN_PN();
//initialise the set of weights for the DENSE subcluster-subcluster WTA synapses PN-PN (GeNN has no automatic function for what we need)
classifier.initialiseWeights_WTA_PN_PN();
//NB: This is now called at the start of each folding trial (see main method) to reset the plastic weights
//initialise the set of weights for the DENSE plastic synapses PN-AN (GeNN has no automatic function for what we need)
//classifier.initialiseWeights_DENSE_PN_AN();
//initialise the set of weights for the DENSE subcluster-subcluster WTA synapses AN-AN (GeNN has no automatic function for what we need)
classifier.initialiseWeights_WTA_AN_AN();
//load set of virtual receptor points VR to be used to generate input levels
classifier.load_VR_data();
//allocate storage on CPU and GPU for the dataset of input rates to the poisson neurons
classifier.initialiseInputData();
//load classes labelling the recording data sets
classifier.loadClassLabels();
// Finally, move all the data arrays initialised across to the device
classifier.populateDeviceMemory();
//re-seed the random number generator after device setup (which used srand in its own way)
srand(time(NULL));
//srand(222); //TODO reset
outputRunParameters();
printf( "Network initialisation completed.\n");
//-----------------------------------------------------------------
//SET UP TRAINING AND TESTING DATASETS
//define the indexes of the test set
unsigned int sizeTestingSet = TOTAL_RECORDINGS / N_FOLDING ; //e.g. 100 recordings into 5 = 20 recordings per bucket
//set up a list of all the recording id's shuffled into a random order, this allows a simple linear split of the training and test data to achieve cross validation
vector<int> shuffledRecordings;
for (int i=0; i<TOTAL_RECORDINGS; i++ ) {//enter all recordings in order
shuffledRecordings.push_back(i);
}
//printf("Re-enable random shuffle!\n");//TODO reset
random_shuffle(shuffledRecordings.begin(),shuffledRecordings.end());
//re-seed the random number generator after shuffle fn (which may have used srand in its own way)
srand(time(NULL));
//srand(222);//TODO reset
//-----------------------------------------------------------------
//set up parameter exploration , if any
//-----------------------------------------------------------------
//string paramName = "SpkActivityThresholdHz";
//int paramValues[] {3,4,6};
//string paramName = "WeightDeltaPNAN";
//float paramValues[] {0.005,0.01,0.025,0.05,0.1,0.2,0.4};
string paramName = "PLASTICITY_INTERVAL_MS";
//float paramValues[] {50,100,200,330,500,1000};
float paramValues[]= {330};
//-----------------------------------------------------------------
//track the overall performance of the classifier
string overallResultsFilename = classifier.outputDir + "/" + classifier.uniqueRunId + " Overall Results for varying " + paramName + ".txt";
FILE * overallResultsFile = fopen(overallResultsFilename.c_str(),"w");
fprintf(overallResultsFile,"%s,AvgPercentScore,StdDev\n",paramName.c_str());
//Run full cross validation, stepping through parameter values supplied
for (int paramIndex = 0; paramIndex < sizeof(paramValues)/sizeof(paramValues[0]); paramIndex++) {
//Apply next param value
Parameter param = {paramName,toString(paramValues[paramIndex])};
//classifier.param_SPIKING_ACTIVITY_THRESHOLD_HZ = paramValues[paramIndex];
//classifier.param_WEIGHT_DELTA_PN_AN = paramValues[paramIndex];
classifier.param_PLASTICITY_INTERVAL_MS = paramValues[paramIndex];
//track the performance across each param setting, per folding
string perParamResultsFilename = classifier.outputDir + "/" + classifier.uniqueRunId + " Totalled Results for " + param.name + "-" + param.value + ".txt";
FILE * perParamResultsFile = fopen(perParamResultsFilename.c_str(),"w");
fprintf(perParamResultsFile,"%s,Folding,Stage,Correct,OutOf,Percent\n",param.name.c_str());
//track the detailed performance of the classifier
string individualResultsFilename = classifier.outputDir + "/" + classifier.uniqueRunId + " Individual Results for " + param.name + "-" + param.value + ".txt";
FILE * individualResultsFile = fopen(individualResultsFilename.c_str(),"w");
fprintf(individualResultsFile,"%s,folding,recordingIdx,classifierSelectedClass,correctClass\n",param.name.c_str());
printf("Individual training results will be saved to the file: %s\n", individualResultsFilename.c_str());
int totalTestScore = 0;
int totalTestCount = 0;
vector<float> vecFoldingResults;//holder for the result of each folding, will be averaged/stdDev at the end of cross validation
for (int folding = 0; folding < N_FOLDING; folding++) {
unsigned int firstTestingSet = folding * sizeTestingSet;
unsigned int lastTestingSet = firstTestingSet + sizeTestingSet -1;
//reset the weights for the plastic synapses PN-AN to a random selection
classifier.initialiseWeights_DENSE_PN_AN();
//update on the device
classifier.updateWeights_PN_AN_on_device();
//-----------------------------------------------------------------
//RUN TRAINING
string stage = "training";
printf( "%s %s, folding %u, %s stage commenced..\n",param.name.c_str(),param.value.c_str(),folding,stage.c_str());
UINT trainingCount = 0;
UINT trainingScore = 0;
//Repeat the training set X times, for more exposure to early observations
for (int rpt = 0; rpt < REPEAT_LEARNING_SET ; rpt++) {
timer.startTimer();
//for each recording in the training set
for (int i=0; i<TOTAL_RECORDINGS; i++ ) {
//leave out nominated test data set, only use training data set
if (i<firstTestingSet || i>lastTestingSet)
{
UINT recordingIdx = shuffledRecordings[i];
bool usePlasticity = true;
bool classifiedCorrectly = applyInputToClassifier(recordingIdx,usePlasticity);
//write classifier decision to file alongside correct class
fprintf(individualResultsFile,"%s,%u,%s,%u,%u,%u\n", param.value.c_str(), folding, stage.c_str(), recordingIdx , classifier.winningClass, classifier.correctClass);
trainingCount++;
if (classifiedCorrectly) trainingScore++;
}
}//end of recordings
timer.stopTimer();
printf( "Presented %u recordings for training in time:%f\n",TOTAL_RECORDINGS-sizeTestingSet, timer.getElapsedTime());
} //end repeats
float trainingPercent =100*((float)trainingScore)/((float)trainingCount);
printf( "%s=%s, Folding %u: Classifier training completed. Score:%u/%u (%f percent) \n",param.name.c_str(),param.value.c_str(),folding,trainingScore,trainingCount,trainingPercent);
fprintf(perParamResultsFile,"%s,%u,%s,%u,%u,%f\n",param.value.c_str(),folding,stage.c_str(),trainingScore,trainingCount,trainingPercent);
//-----------------------------------------------------------------
//TESTING
stage = "testing";
printf( "%s %s, folding %u, %s stage commenced..\n",param.name.c_str(),param.value.c_str(),folding,stage.c_str());
UINT testCount = 0;
UINT testScore = 0;
printf( "Classifier testing commenced..\n");
//for each recording in the test set
for (int i=firstTestingSet; i<=lastTestingSet; i++ )
{
int recordingIdx = shuffledRecordings[i];
bool classifiedCorrectly = applyInputToClassifier(recordingIdx,false);//no plasticity
//write classifier decision to file alongside correct class
fprintf(individualResultsFile,"%s,%u,%s,%u,%u,%u\n", param.value.c_str(), folding, stage.c_str(), recordingIdx , classifier.winningClass, classifier.correctClass);
testCount++;
if (classifiedCorrectly) testScore++;
}
float testPercent = 100*((float)testScore)/((float)testCount);
printf( "Folding %u: Classifier Testing completed. Score:%u/%u (%f percent) \n",folding, testScore,testCount,testPercent);
fprintf(perParamResultsFile,"%s,%u,%s,%u,%u,%f\n",param.value.c_str(), folding,stage.c_str(),testScore,testCount,testPercent);
//save the score for this folding for later processing (we will want the std dev)
vecFoldingResults.push_back(testPercent);
totalTestScore += testScore;
totalTestCount += testCount;
} //goto next folding
//end of foldings
fclose(perParamResultsFile);
fclose(individualResultsFile);
//analyze per param results
float avg = getAverage(vecFoldingResults);
float stdDev = getStdDev(vecFoldingResults,avg);
float totalTestPercent = 100*((float)totalTestScore)/((float)totalTestCount);
printf( "Classifier Training/Testing completed for %s=%s. Total Score:%u/%u (%f percent) \n", param.name.c_str(),param.value.c_str(),totalTestScore,totalTestCount,totalTestPercent);
fprintf(overallResultsFile,"%s,%f,%f\n",param.value.c_str(),avg,stdDev);
} //goto next param value
//end of param exploration
fclose(overallResultsFile);
//shut down device before classifier instance destroyed
//classifier.clearDownDevice();
//This is done in the classifier destructor which also clears CPU memory etc
printTextFile(overallResultsFilename);
printf( "End of Run %s\n", classifier.uniqueRunId.c_str());
return 0;
}//END OF MAIN
|
22a5d0cc720a1cc9727479722c66b99bc9f85c02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define MAX_THREADS 20
#define pi(x) printf("%d\n",x);
#define HANDLE_ERROR(err) ( HandleError( err, __FILE__, __LINE__ ) )
#define th_p_block 256
__global__ void dotPro(long n, float *vec1, float *vec2, float *vec3) {
__shared__ float cache[th_p_block];
unsigned tid = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int cacheIdx = threadIdx.x;
float temp = 0;
while(tid < n)
{
temp += vec1[tid] * vec2[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIdx] = temp;
__syncthreads();
// reduction
unsigned i = blockDim.x/2; // need the num threads to be a power of two (256 is okay)
while( i != 0 ){
if(cacheIdx < i){
cache[cacheIdx] += cache[cacheIdx + i ];
}
__syncthreads(); //sync threads in the current block
// power of two needed here
i = i/2;
}
if(cacheIdx == 0){
vec3[blockIdx.x] = cache[0];
}
// if (tid < n) vec3[i] = vec1[i] * vec2[i];
} | 22a5d0cc720a1cc9727479722c66b99bc9f85c02.cu | #include "includes.h"
#define MAX_THREADS 20
#define pi(x) printf("%d\n",x);
#define HANDLE_ERROR(err) ( HandleError( err, __FILE__, __LINE__ ) )
#define th_p_block 256
__global__ void dotPro(long n, float *vec1, float *vec2, float *vec3) {
__shared__ float cache[th_p_block];
unsigned tid = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int cacheIdx = threadIdx.x;
float temp = 0;
while(tid < n)
{
temp += vec1[tid] * vec2[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIdx] = temp;
__syncthreads();
// reduction
unsigned i = blockDim.x/2; // need the num threads to be a power of two (256 is okay)
while( i != 0 ){
if(cacheIdx < i){
cache[cacheIdx] += cache[cacheIdx + i ];
}
__syncthreads(); //sync threads in the current block
// power of two needed here
i = i/2;
}
if(cacheIdx == 0){
vec3[blockIdx.x] = cache[0];
}
// if (tid < n) vec3[i] = vec1[i] * vec2[i];
} |
4803a2efba06a76f46274a8fb7dba2c8b2b50e15.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "common.h"
#include "helper_functions.cuh"
__global__
void boxblur_ker_sep_y(int* d_ptr, uchar* d_res, int ker, int oh, int ow)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int c = blockIdx.z;
int h = oh - (ker - 1);
int w = ow;
if (i >= h || j >= w) {
return;
}
int sum_r = 0;
int kj = j;
for (int ki = i; ki < i+ker; ki++){
int r = d_ptr[3*(ki*ow + kj) + c];
sum_r += r;
}
int ker_size = ker*ker;
d_res[3*(i*w+j) + c] = sum_r/ker_size;
}
__global__
void boxblur_ker_sep_x(uchar* d_ptr, int* d_res, int ker, int oh, int ow)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int c = blockIdx.z;
int h = oh;
int w = ow - (ker - 1);
if (i >= h || j >= w) {
return;
}
int sum_r = 0;
int ki = i;
for (int kj = j; kj < j+ker; kj++){
uchar r = d_ptr[3*(ki*ow + kj) + c];
sum_r += r;
}
// int ker_size = ker;
d_res[3*(i*w+j) + c] = sum_r; ///ker_size;
}
void box_blur_cuda_sep(PPMImage* img, PPMImage* res, int ker){
int h = res->x;
int w = res->y;
int oh = img->x;
int ow = img->y;
int th = oh;
int tw = ow - (ker - 1);
uchar* ptr = (uchar*) img->data;
uchar* dst = (uchar*) res->data;
uchar* d_ptr, *d_res;
int* d_int;
checkCudaErrors(hipMalloc(&d_ptr, oh*ow*3*sizeof(uchar)));
checkCudaErrors(hipMalloc(&d_int, th*tw*3*sizeof(int)));
checkCudaErrors(hipMalloc(&d_res, h*w*3*sizeof(uchar)));
hipEvent_t start_all, stop_all, start_comp, stop_comp;
checkCudaErrors(hipEventCreate(&start_all));
checkCudaErrors(hipEventCreate(&stop_all));
checkCudaErrors(hipEventCreate(&start_comp));
checkCudaErrors(hipEventCreate(&stop_comp));
checkCudaErrors(hipEventRecord(start_all));
checkCudaErrors(hipMemcpy(d_ptr, ptr, oh*ow*3*sizeof(uchar), hipMemcpyHostToDevice));
int cell_size = 32;
int num_blocks_x;
int num_blocks_y;
dim3 block_size;
dim3 grid_size;
checkCudaErrors(hipEventRecord(start_comp));
num_blocks_x = th/cell_size + (th % cell_size != 0);
num_blocks_y = tw/cell_size + (tw % cell_size != 0);
block_size = dim3(cell_size, cell_size);
grid_size = dim3(num_blocks_x, num_blocks_y, 3);
hipLaunchKernelGGL(( boxblur_ker_sep_x), dim3(grid_size), dim3(block_size), 0, 0, d_ptr, d_int, ker, oh, ow);
num_blocks_x = h/cell_size + (h % cell_size != 0);
num_blocks_y = w/cell_size + (w % cell_size != 0);
block_size = dim3(cell_size, cell_size);
grid_size = dim3(num_blocks_x, num_blocks_y, 3);
hipLaunchKernelGGL(( boxblur_ker_sep_y), dim3(grid_size), dim3(block_size), 0, 0, d_int, d_res, ker, th, tw);
checkCudaErrors(hipEventRecord(stop_comp));
checkCudaErrors(hipMemcpy(dst, d_res, h*w*3*sizeof(uchar)*sizeof(uchar), hipMemcpyDeviceToHost));
checkCudaErrors(hipEventRecord(stop_all));
checkCudaErrors(hipEventSynchronize(stop_all));
float milliseconds_all=0, milliseconds_comp=0, milliseconds_data=0;
checkCudaErrors(hipEventElapsedTime(&milliseconds_all, start_all, stop_all));
checkCudaErrors(hipEventElapsedTime(&milliseconds_comp, start_comp, stop_comp));
milliseconds_data = milliseconds_all - milliseconds_comp;
std::cout << "GPU KERNEL TIME (microseconds):\t" << (int) (milliseconds_comp*1000) << std::endl;
std::cout << "GPU TOTAL TIME (microseconds):\t" << (int) (milliseconds_all*1000) << std::endl;
std::cout << "GPU DATA TIME (microseconds):\t" << (int) (milliseconds_data*1000) << std::endl;
checkCudaErrors(hipFree(d_ptr));
checkCudaErrors(hipFree(d_res));
}
__global__
void boxblur_ker(uchar* d_ptr, uchar* d_res, int ker, int oh, int ow)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int c = blockIdx.z;
int h = oh - (ker - 1);
int w = ow - (ker - 1);
if (i >= h || j >= w) {
return;
}
int sum_r = 0;
for (int ki = i; ki < i+ker; ki++){
for (int kj = j; kj < j+ker; kj++){
uchar r = d_ptr[3*(ki*ow + kj) + c];
sum_r += r;
}
}
int ker_size = ker*ker;
d_res[3*(i*w+j) + c] = sum_r/ker_size;
}
void box_blur_cuda(PPMImage* img, PPMImage* res, int ker){
int h = res->x;
int w = res->y;
int oh = img->x;
int ow = img->y;
uchar* ptr = (uchar*) img->data;
uchar* dst = (uchar*) res->data;
uchar* d_ptr, *d_res;
checkCudaErrors(hipMalloc(&d_ptr, oh*ow*3*sizeof(uchar)));
checkCudaErrors(hipMalloc(&d_res, h*w*3*sizeof(uchar)));
hipEvent_t start_all, stop_all, start_comp, stop_comp;
checkCudaErrors(hipEventCreate(&start_all));
checkCudaErrors(hipEventCreate(&stop_all));
checkCudaErrors(hipEventCreate(&start_comp));
checkCudaErrors(hipEventCreate(&stop_comp));
checkCudaErrors(hipEventRecord(start_all));
checkCudaErrors(hipMemcpy(d_ptr, ptr, oh*ow*3*sizeof(uchar), hipMemcpyHostToDevice));
int cell_size = 32;
int num_blocks_x = h/cell_size + (h % cell_size != 0);
int num_blocks_y = w/cell_size + (w % cell_size != 0);
dim3 block_size = dim3(cell_size, cell_size);
dim3 grid_size = dim3(num_blocks_x, num_blocks_y, 3);
checkCudaErrors(hipEventRecord(start_comp));
hipLaunchKernelGGL(( boxblur_ker), dim3(grid_size), dim3(block_size), 0, 0, d_ptr, d_res, ker, oh, ow);
checkCudaErrors(hipEventRecord(stop_comp));
checkCudaErrors(hipMemcpy(dst, d_res, h*w*3*sizeof(uchar)*sizeof(uchar), hipMemcpyDeviceToHost));
checkCudaErrors(hipEventRecord(stop_all));
checkCudaErrors(hipEventSynchronize(stop_all));
float milliseconds_all=0, milliseconds_comp=0, milliseconds_data=0;
checkCudaErrors(hipEventElapsedTime(&milliseconds_all, start_all, stop_all));
checkCudaErrors(hipEventElapsedTime(&milliseconds_comp, start_comp, stop_comp));
milliseconds_data = milliseconds_all - milliseconds_comp;
std::cout << "GPU KERNEL TIME (microseconds):\t" << (int) (milliseconds_comp*1000) << std::endl;
std::cout << "GPU TOTAL TIME (microseconds):\t" << (int) (milliseconds_all*1000) << std::endl;
std::cout << "GPU DATA TIME (microseconds):\t" << (int) (milliseconds_data*1000) << std::endl;
checkCudaErrors(hipFree(d_ptr));
checkCudaErrors(hipFree(d_res));
}
| 4803a2efba06a76f46274a8fb7dba2c8b2b50e15.cu | #include <iostream>
#include "common.h"
#include "helper_functions.cuh"
__global__
void boxblur_ker_sep_y(int* d_ptr, uchar* d_res, int ker, int oh, int ow)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int c = blockIdx.z;
int h = oh - (ker - 1);
int w = ow;
if (i >= h || j >= w) {
return;
}
int sum_r = 0;
int kj = j;
for (int ki = i; ki < i+ker; ki++){
int r = d_ptr[3*(ki*ow + kj) + c];
sum_r += r;
}
int ker_size = ker*ker;
d_res[3*(i*w+j) + c] = sum_r/ker_size;
}
__global__
void boxblur_ker_sep_x(uchar* d_ptr, int* d_res, int ker, int oh, int ow)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int c = blockIdx.z;
int h = oh;
int w = ow - (ker - 1);
if (i >= h || j >= w) {
return;
}
int sum_r = 0;
int ki = i;
for (int kj = j; kj < j+ker; kj++){
uchar r = d_ptr[3*(ki*ow + kj) + c];
sum_r += r;
}
// int ker_size = ker;
d_res[3*(i*w+j) + c] = sum_r; ///ker_size;
}
void box_blur_cuda_sep(PPMImage* img, PPMImage* res, int ker){
int h = res->x;
int w = res->y;
int oh = img->x;
int ow = img->y;
int th = oh;
int tw = ow - (ker - 1);
uchar* ptr = (uchar*) img->data;
uchar* dst = (uchar*) res->data;
uchar* d_ptr, *d_res;
int* d_int;
checkCudaErrors(cudaMalloc(&d_ptr, oh*ow*3*sizeof(uchar)));
checkCudaErrors(cudaMalloc(&d_int, th*tw*3*sizeof(int)));
checkCudaErrors(cudaMalloc(&d_res, h*w*3*sizeof(uchar)));
cudaEvent_t start_all, stop_all, start_comp, stop_comp;
checkCudaErrors(cudaEventCreate(&start_all));
checkCudaErrors(cudaEventCreate(&stop_all));
checkCudaErrors(cudaEventCreate(&start_comp));
checkCudaErrors(cudaEventCreate(&stop_comp));
checkCudaErrors(cudaEventRecord(start_all));
checkCudaErrors(cudaMemcpy(d_ptr, ptr, oh*ow*3*sizeof(uchar), cudaMemcpyHostToDevice));
int cell_size = 32;
int num_blocks_x;
int num_blocks_y;
dim3 block_size;
dim3 grid_size;
checkCudaErrors(cudaEventRecord(start_comp));
num_blocks_x = th/cell_size + (th % cell_size != 0);
num_blocks_y = tw/cell_size + (tw % cell_size != 0);
block_size = dim3(cell_size, cell_size);
grid_size = dim3(num_blocks_x, num_blocks_y, 3);
boxblur_ker_sep_x<<<grid_size, block_size>>>(d_ptr, d_int, ker, oh, ow);
num_blocks_x = h/cell_size + (h % cell_size != 0);
num_blocks_y = w/cell_size + (w % cell_size != 0);
block_size = dim3(cell_size, cell_size);
grid_size = dim3(num_blocks_x, num_blocks_y, 3);
boxblur_ker_sep_y<<<grid_size, block_size>>>(d_int, d_res, ker, th, tw);
checkCudaErrors(cudaEventRecord(stop_comp));
checkCudaErrors(cudaMemcpy(dst, d_res, h*w*3*sizeof(uchar)*sizeof(uchar), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaEventRecord(stop_all));
checkCudaErrors(cudaEventSynchronize(stop_all));
float milliseconds_all=0, milliseconds_comp=0, milliseconds_data=0;
checkCudaErrors(cudaEventElapsedTime(&milliseconds_all, start_all, stop_all));
checkCudaErrors(cudaEventElapsedTime(&milliseconds_comp, start_comp, stop_comp));
milliseconds_data = milliseconds_all - milliseconds_comp;
std::cout << "GPU KERNEL TIME (microseconds):\t" << (int) (milliseconds_comp*1000) << std::endl;
std::cout << "GPU TOTAL TIME (microseconds):\t" << (int) (milliseconds_all*1000) << std::endl;
std::cout << "GPU DATA TIME (microseconds):\t" << (int) (milliseconds_data*1000) << std::endl;
checkCudaErrors(cudaFree(d_ptr));
checkCudaErrors(cudaFree(d_res));
}
__global__
void boxblur_ker(uchar* d_ptr, uchar* d_res, int ker, int oh, int ow)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int c = blockIdx.z;
int h = oh - (ker - 1);
int w = ow - (ker - 1);
if (i >= h || j >= w) {
return;
}
int sum_r = 0;
for (int ki = i; ki < i+ker; ki++){
for (int kj = j; kj < j+ker; kj++){
uchar r = d_ptr[3*(ki*ow + kj) + c];
sum_r += r;
}
}
int ker_size = ker*ker;
d_res[3*(i*w+j) + c] = sum_r/ker_size;
}
void box_blur_cuda(PPMImage* img, PPMImage* res, int ker){
int h = res->x;
int w = res->y;
int oh = img->x;
int ow = img->y;
uchar* ptr = (uchar*) img->data;
uchar* dst = (uchar*) res->data;
uchar* d_ptr, *d_res;
checkCudaErrors(cudaMalloc(&d_ptr, oh*ow*3*sizeof(uchar)));
checkCudaErrors(cudaMalloc(&d_res, h*w*3*sizeof(uchar)));
cudaEvent_t start_all, stop_all, start_comp, stop_comp;
checkCudaErrors(cudaEventCreate(&start_all));
checkCudaErrors(cudaEventCreate(&stop_all));
checkCudaErrors(cudaEventCreate(&start_comp));
checkCudaErrors(cudaEventCreate(&stop_comp));
checkCudaErrors(cudaEventRecord(start_all));
checkCudaErrors(cudaMemcpy(d_ptr, ptr, oh*ow*3*sizeof(uchar), cudaMemcpyHostToDevice));
int cell_size = 32;
int num_blocks_x = h/cell_size + (h % cell_size != 0);
int num_blocks_y = w/cell_size + (w % cell_size != 0);
dim3 block_size = dim3(cell_size, cell_size);
dim3 grid_size = dim3(num_blocks_x, num_blocks_y, 3);
checkCudaErrors(cudaEventRecord(start_comp));
boxblur_ker<<<grid_size, block_size>>>(d_ptr, d_res, ker, oh, ow);
checkCudaErrors(cudaEventRecord(stop_comp));
checkCudaErrors(cudaMemcpy(dst, d_res, h*w*3*sizeof(uchar)*sizeof(uchar), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaEventRecord(stop_all));
checkCudaErrors(cudaEventSynchronize(stop_all));
float milliseconds_all=0, milliseconds_comp=0, milliseconds_data=0;
checkCudaErrors(cudaEventElapsedTime(&milliseconds_all, start_all, stop_all));
checkCudaErrors(cudaEventElapsedTime(&milliseconds_comp, start_comp, stop_comp));
milliseconds_data = milliseconds_all - milliseconds_comp;
std::cout << "GPU KERNEL TIME (microseconds):\t" << (int) (milliseconds_comp*1000) << std::endl;
std::cout << "GPU TOTAL TIME (microseconds):\t" << (int) (milliseconds_all*1000) << std::endl;
std::cout << "GPU DATA TIME (microseconds):\t" << (int) (milliseconds_data*1000) << std::endl;
checkCudaErrors(cudaFree(d_ptr));
checkCudaErrors(cudaFree(d_res));
}
|
78a4a95945d6ef8ee9ab7e4010ff2b83dc98bda3.hip | // !!! This is a file automatically generated by hipify!!!
// To Compile: nvcc filtBackproj.cu -o filtBackproj.out -I/usr/local/cuda/include -L/usr/local/cuda/lib -lcufft
// To Run: ./filtBackproj.out numAngles theta.txt minR maxR numSensors sg.txt minX maxX numX minY maxY numY recon.txt
// ./filtBackproj.out 360 theta.txt -53.2 53.2 2129 sg.txt -44.25 44.25 886 -29.5 29.5 591 recon.txt
// Includes System
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// Includes CUDA
#include <hipfft.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#define pi 3.141592653589793238462643383279502884197169399375105820974f
#define MIN(a,b) (((a)<(b))?(a):(b))
#define MAX(a,b) (((a)>(b))?(a):(b))
// 1D float textures
texture<float, hipTextureType1DLayered, hipReadModeElementType> texSinogram;
// 1D interpolation kernel: Should be very similar to what you get if you used 1D interpolation on MATLAB
__global__ void backprojKernel(int numAngles, float *d_theta,
float xmin, float dx, int numx,
float ymin, float dy, int numy,
float rmin, float dr, float *d_output)
{
unsigned int x_idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y_idx = blockIdx.y * blockDim.y + threadIdx.y;
if (x_idx < numx && y_idx < numy) {
float x, y, r, r_idx, integral = 0;
x = xmin + x_idx * dx;
y = ymin + y_idx * dy;
for (int theta_idx = 0; theta_idx < numAngles; theta_idx++) {
r = x*cosf(d_theta[theta_idx] * pi / 180.0f) + y*sinf(d_theta[theta_idx] * pi / 180.0f);
r_idx = (r - rmin) / dr + 0.5f; // +0.5f is texture thing
integral += tex1DLayered(texSinogram, r_idx, theta_idx);
}
d_output[x_idx + numx * y_idx] = integral;
}
}
// Ramp Filter Kernel
__global__ void rampFiltKernel(hipfftComplex* sgFFT, int numSensors, int numAngles)
{
// Calculate normalized texture coordinates
unsigned int ii = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int jj = blockIdx.y * blockDim.y + threadIdx.y;
if (ii < numSensors && jj < numAngles) {
sgFFT[ii + numSensors*jj].x *= MIN(ii, numSensors - ii) / (float)numSensors;
sgFFT[ii + numSensors*jj].y *= MIN(ii, numSensors - ii) / (float)numSensors;
}
}
// Get float array for real component of hipfftComplex array
__global__ void getRealPart(float* dst, hipfftComplex* src, int numVals)
{
// Calculate normalized texture coordinates
unsigned int ii = blockIdx.x * blockDim.x + threadIdx.x;
if (ii < numVals) dst[ii] = src[ii].x;
}
// Host code
int main(int argc, char *argv[])
{
// Get all integer values from inputs
int numAngles = strtol(argv[1], NULL, 10);
float minR = atof(argv[3]);
float maxR = atof(argv[4]);
int numSensors = strtol(argv[5], NULL, 10);
float minX = atof(argv[7]);
float maxX = atof(argv[8]);
int numX = strtol(argv[9], NULL, 10);
float minY = atof(argv[10]);
float maxY = atof(argv[11]);
int numY = strtol(argv[12], NULL, 10);
// Calculate some other values based on those inputs
float dr = (maxR - minR) / ((float)(numSensors - 1));
float dx = (maxX - minX) / ((float)(numX - 1));
float dy = (maxY - minY) / ((float)(numY - 1));
// Read data from files to host arrays
hipfftComplex* sg;
sg = (hipfftComplex *)malloc(sizeof(hipfftComplex) * numAngles * numSensors);
float *h_theta, *sg_rf;
h_theta = (float*)malloc(numAngles*sizeof(float));
sg_rf = (float*)malloc(numAngles*numSensors*sizeof(float));
FILE *in_sg = fopen(argv[6], "r");
FILE *in_theta = fopen(argv[2], "r");
if (in_sg == NULL)
{
fprintf(stderr, "Input file for sinogram has some issues. Please check."); exit(1);
}
if (in_theta == NULL)
{
fprintf(stderr, "Input file for angle info has some issues. Please check."); exit(1);
}
float datfromfile;
for (int jj = 0; jj < numAngles; jj++) {
for (int ii = 0; ii < numSensors; ii++) {
fscanf(in_sg, "%f", &datfromfile);
sg[ii + numSensors*jj].x = datfromfile;
sg[ii + numSensors*jj].y = datfromfile;
}
}
for (int kk = 0; kk < numAngles; kk++) {
fscanf(in_theta, "%f", &datfromfile);
h_theta[kk] = datfromfile;
}
float *d_theta, *d_sg_rf;
hipMalloc(&d_theta, numAngles * sizeof(float));
hipMalloc(&d_sg_rf, numAngles * numSensors * sizeof(float));
hipMemcpy(d_theta, h_theta, numAngles * sizeof(float), hipMemcpyHostToDevice);
// Ramp filter the sinogram before binding it to a texture
// Setup device input data for FFT
hipfftComplex* dData;
hipMalloc((void **)&dData, sizeof(hipfftComplex) * numAngles * numSensors);
if (hipGetLastError() != hipSuccess)
{
fprintf(stderr, "Cuda error: Failed to allocate\n"); return -1;
}
// Copy Host Array to Device Array
hipMemcpy(dData, sg, sizeof(hipfftComplex)* numAngles * numSensors, hipMemcpyHostToDevice);
// Make FFT Plan
hipfftHandle plan;
if (hipfftPlan1d(&plan, numSensors, HIPFFT_C2C, numAngles) != HIPFFT_SUCCESS)
{
fprintf(stderr, "CUFFT error: Plan creation failed"); return -1;
}
// Execute FFT
if (hipfftExecC2C(plan, dData, dData, HIPFFT_FORWARD) != HIPFFT_SUCCESS)
{
fprintf(stderr, "CUFFT error: ExecC2C Forward failed"); return -1;
}
if (hipDeviceSynchronize() != hipSuccess)
{
fprintf(stderr, "Cuda error: Failed to synchronize\n"); return -1;
}
// Now Ramp Filter the FFT
dim3 dimBlockRF(16, 16, 1);
dim3 dimGridRF((numSensors + dimBlockRF.x - 1) / dimBlockRF.x,
(numAngles + dimBlockRF.y - 1) / dimBlockRF.y, 1);
rampFiltKernel << <dimGridRF, dimBlockRF >> >(dData, numSensors, numAngles);
// Do Inverse FFT
if (hipfftExecC2C(plan, dData, dData, HIPFFT_BACKWARD) != HIPFFT_SUCCESS)
{
fprintf(stderr, "CUFFT error: ExecC2C Forward failed"); return -1;
}
if (hipDeviceSynchronize() != hipSuccess)
{
fprintf(stderr, "Cuda error: Failed to synchronize\n"); return -1;
}
// Write the real part of output as the ramp filtered sinogram
int thdsPerBlk = 256;
int blksPerGrid = (numSensors*numAngles + thdsPerBlk - 1) / thdsPerBlk;
getRealPart << <blksPerGrid, thdsPerBlk >> >(d_sg_rf, dData, numSensors*numAngles);
hipMemcpy(sg_rf, d_sg_rf, sizeof(float)*numSensors*numAngles, hipMemcpyDeviceToHost);
// Set Up Texture for the Sinogram in Three Steps: 1), 2), and 3)
// 1) Allocate CUDA array in device memory
hipExtent extentDesc = make_hipExtent(numSensors, 0, numAngles); // <-- 0 height required for 1Dlayered
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipMemcpy3DParms mParams = { 0 };
mParams.srcPtr = make_hipPitchedPtr(sg_rf, numSensors*sizeof(float), numSensors, 1);
mParams.kind = hipMemcpyHostToDevice;
mParams.extent = make_hipExtent(numSensors, 1, numAngles); // <<-- non zero height required for memcpy to do anything
hipArray* cuArray;
hipMalloc3DArray(&cuArray, &channelDesc, extentDesc, hipArrayLayered);
mParams.dstArray = cuArray;
hipMemcpy3D(&mParams);
// 2) Set texture reference parameters
texSinogram.addressMode[0] = hipAddressModeBorder;
texSinogram.filterMode = hipFilterModeLinear;
texSinogram.normalized = false;
// 3) Bind the array to the texture reference
hipBindTextureToArray(texSinogram, cuArray, channelDesc);
// Allocate result of backprojection in device memory
float *d_output;
hipMalloc(&d_output, numX * numY * sizeof(float));
float *h_output;
h_output = (float*)malloc(numX*numY*sizeof(float));
// Invoke kernel
dim3 dimBlockBP(16, 16, 1);
dim3 dimGridBP((numX + dimBlockBP.x - 1) / dimBlockBP.x,
(numY + dimBlockBP.y - 1) / dimBlockBP.y, 1);
backprojKernel << <dimGridBP, dimBlockBP >> >(numAngles, d_theta, minX, dx, numX, minY, dy, numY, minR, dr, d_output);
// Now write the output to a text file
hipMemcpy(h_output, d_output, numX * numY * sizeof(float), hipMemcpyDeviceToHost);
FILE *out = fopen(argv[13], "w");
if (out == NULL) { printf("Error opening file!\n"); exit(1); }
for (int y_idx = 0; y_idx < numY; y_idx++) {
for (int x_idx = 0; x_idx < numX; x_idx++) {
fprintf(out, "%f\n", h_output[x_idx + numX * y_idx]);
}
}
// Free device memory
hipFreeArray(cuArray);
hipFree(d_output);
return 0;
}
| 78a4a95945d6ef8ee9ab7e4010ff2b83dc98bda3.cu | // To Compile: nvcc filtBackproj.cu -o filtBackproj.out -I/usr/local/cuda/include -L/usr/local/cuda/lib -lcufft
// To Run: ./filtBackproj.out numAngles theta.txt minR maxR numSensors sg.txt minX maxX numX minY maxY numY recon.txt
// ./filtBackproj.out 360 theta.txt -53.2 53.2 2129 sg.txt -44.25 44.25 886 -29.5 29.5 591 recon.txt
// Includes System
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// Includes CUDA
#include <cufft.h>
#include <cuda_runtime.h>
#include <cuda_profiler_api.h>
#define pi 3.141592653589793238462643383279502884197169399375105820974f
#define MIN(a,b) (((a)<(b))?(a):(b))
#define MAX(a,b) (((a)>(b))?(a):(b))
// 1D float textures
texture<float, cudaTextureType1DLayered, cudaReadModeElementType> texSinogram;
// 1D interpolation kernel: Should be very similar to what you get if you used 1D interpolation on MATLAB
__global__ void backprojKernel(int numAngles, float *d_theta,
float xmin, float dx, int numx,
float ymin, float dy, int numy,
float rmin, float dr, float *d_output)
{
unsigned int x_idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y_idx = blockIdx.y * blockDim.y + threadIdx.y;
if (x_idx < numx && y_idx < numy) {
float x, y, r, r_idx, integral = 0;
x = xmin + x_idx * dx;
y = ymin + y_idx * dy;
for (int theta_idx = 0; theta_idx < numAngles; theta_idx++) {
r = x*cosf(d_theta[theta_idx] * pi / 180.0f) + y*sinf(d_theta[theta_idx] * pi / 180.0f);
r_idx = (r - rmin) / dr + 0.5f; // +0.5f is texture thing
integral += tex1DLayered(texSinogram, r_idx, theta_idx);
}
d_output[x_idx + numx * y_idx] = integral;
}
}
// Ramp Filter Kernel
__global__ void rampFiltKernel(cufftComplex* sgFFT, int numSensors, int numAngles)
{
// Calculate normalized texture coordinates
unsigned int ii = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int jj = blockIdx.y * blockDim.y + threadIdx.y;
if (ii < numSensors && jj < numAngles) {
sgFFT[ii + numSensors*jj].x *= MIN(ii, numSensors - ii) / (float)numSensors;
sgFFT[ii + numSensors*jj].y *= MIN(ii, numSensors - ii) / (float)numSensors;
}
}
// Get float array for real component of cufftComplex array
__global__ void getRealPart(float* dst, cufftComplex* src, int numVals)
{
// Calculate normalized texture coordinates
unsigned int ii = blockIdx.x * blockDim.x + threadIdx.x;
if (ii < numVals) dst[ii] = src[ii].x;
}
// Host code
int main(int argc, char *argv[])
{
// Get all integer values from inputs
int numAngles = strtol(argv[1], NULL, 10);
float minR = atof(argv[3]);
float maxR = atof(argv[4]);
int numSensors = strtol(argv[5], NULL, 10);
float minX = atof(argv[7]);
float maxX = atof(argv[8]);
int numX = strtol(argv[9], NULL, 10);
float minY = atof(argv[10]);
float maxY = atof(argv[11]);
int numY = strtol(argv[12], NULL, 10);
// Calculate some other values based on those inputs
float dr = (maxR - minR) / ((float)(numSensors - 1));
float dx = (maxX - minX) / ((float)(numX - 1));
float dy = (maxY - minY) / ((float)(numY - 1));
// Read data from files to host arrays
cufftComplex* sg;
sg = (cufftComplex *)malloc(sizeof(cufftComplex) * numAngles * numSensors);
float *h_theta, *sg_rf;
h_theta = (float*)malloc(numAngles*sizeof(float));
sg_rf = (float*)malloc(numAngles*numSensors*sizeof(float));
FILE *in_sg = fopen(argv[6], "r");
FILE *in_theta = fopen(argv[2], "r");
if (in_sg == NULL)
{
fprintf(stderr, "Input file for sinogram has some issues. Please check."); exit(1);
}
if (in_theta == NULL)
{
fprintf(stderr, "Input file for angle info has some issues. Please check."); exit(1);
}
float datfromfile;
for (int jj = 0; jj < numAngles; jj++) {
for (int ii = 0; ii < numSensors; ii++) {
fscanf(in_sg, "%f", &datfromfile);
sg[ii + numSensors*jj].x = datfromfile;
sg[ii + numSensors*jj].y = datfromfile;
}
}
for (int kk = 0; kk < numAngles; kk++) {
fscanf(in_theta, "%f", &datfromfile);
h_theta[kk] = datfromfile;
}
float *d_theta, *d_sg_rf;
cudaMalloc(&d_theta, numAngles * sizeof(float));
cudaMalloc(&d_sg_rf, numAngles * numSensors * sizeof(float));
cudaMemcpy(d_theta, h_theta, numAngles * sizeof(float), cudaMemcpyHostToDevice);
// Ramp filter the sinogram before binding it to a texture
// Setup device input data for FFT
cufftComplex* dData;
cudaMalloc((void **)&dData, sizeof(cufftComplex) * numAngles * numSensors);
if (cudaGetLastError() != cudaSuccess)
{
fprintf(stderr, "Cuda error: Failed to allocate\n"); return -1;
}
// Copy Host Array to Device Array
cudaMemcpy(dData, sg, sizeof(cufftComplex)* numAngles * numSensors, cudaMemcpyHostToDevice);
// Make FFT Plan
cufftHandle plan;
if (cufftPlan1d(&plan, numSensors, CUFFT_C2C, numAngles) != CUFFT_SUCCESS)
{
fprintf(stderr, "CUFFT error: Plan creation failed"); return -1;
}
// Execute FFT
if (cufftExecC2C(plan, dData, dData, CUFFT_FORWARD) != CUFFT_SUCCESS)
{
fprintf(stderr, "CUFFT error: ExecC2C Forward failed"); return -1;
}
if (cudaThreadSynchronize() != cudaSuccess)
{
fprintf(stderr, "Cuda error: Failed to synchronize\n"); return -1;
}
// Now Ramp Filter the FFT
dim3 dimBlockRF(16, 16, 1);
dim3 dimGridRF((numSensors + dimBlockRF.x - 1) / dimBlockRF.x,
(numAngles + dimBlockRF.y - 1) / dimBlockRF.y, 1);
rampFiltKernel << <dimGridRF, dimBlockRF >> >(dData, numSensors, numAngles);
// Do Inverse FFT
if (cufftExecC2C(plan, dData, dData, CUFFT_INVERSE) != CUFFT_SUCCESS)
{
fprintf(stderr, "CUFFT error: ExecC2C Forward failed"); return -1;
}
if (cudaThreadSynchronize() != cudaSuccess)
{
fprintf(stderr, "Cuda error: Failed to synchronize\n"); return -1;
}
// Write the real part of output as the ramp filtered sinogram
int thdsPerBlk = 256;
int blksPerGrid = (numSensors*numAngles + thdsPerBlk - 1) / thdsPerBlk;
getRealPart << <blksPerGrid, thdsPerBlk >> >(d_sg_rf, dData, numSensors*numAngles);
cudaMemcpy(sg_rf, d_sg_rf, sizeof(float)*numSensors*numAngles, cudaMemcpyDeviceToHost);
// Set Up Texture for the Sinogram in Three Steps: 1), 2), and 3)
// 1) Allocate CUDA array in device memory
cudaExtent extentDesc = make_cudaExtent(numSensors, 0, numAngles); // <-- 0 height required for 1Dlayered
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaMemcpy3DParms mParams = { 0 };
mParams.srcPtr = make_cudaPitchedPtr(sg_rf, numSensors*sizeof(float), numSensors, 1);
mParams.kind = cudaMemcpyHostToDevice;
mParams.extent = make_cudaExtent(numSensors, 1, numAngles); // <<-- non zero height required for memcpy to do anything
cudaArray* cuArray;
cudaMalloc3DArray(&cuArray, &channelDesc, extentDesc, cudaArrayLayered);
mParams.dstArray = cuArray;
cudaMemcpy3D(&mParams);
// 2) Set texture reference parameters
texSinogram.addressMode[0] = cudaAddressModeBorder;
texSinogram.filterMode = cudaFilterModeLinear;
texSinogram.normalized = false;
// 3) Bind the array to the texture reference
cudaBindTextureToArray(texSinogram, cuArray, channelDesc);
// Allocate result of backprojection in device memory
float *d_output;
cudaMalloc(&d_output, numX * numY * sizeof(float));
float *h_output;
h_output = (float*)malloc(numX*numY*sizeof(float));
// Invoke kernel
dim3 dimBlockBP(16, 16, 1);
dim3 dimGridBP((numX + dimBlockBP.x - 1) / dimBlockBP.x,
(numY + dimBlockBP.y - 1) / dimBlockBP.y, 1);
backprojKernel << <dimGridBP, dimBlockBP >> >(numAngles, d_theta, minX, dx, numX, minY, dy, numY, minR, dr, d_output);
// Now write the output to a text file
cudaMemcpy(h_output, d_output, numX * numY * sizeof(float), cudaMemcpyDeviceToHost);
FILE *out = fopen(argv[13], "w");
if (out == NULL) { printf("Error opening file!\n"); exit(1); }
for (int y_idx = 0; y_idx < numY; y_idx++) {
for (int x_idx = 0; x_idx < numX; x_idx++) {
fprintf(out, "%f\n", h_output[x_idx + numX * y_idx]);
}
}
// Free device memory
cudaFreeArray(cuArray);
cudaFree(d_output);
return 0;
}
|
0064e393e8732eaefd81594ebb225165ecaf6150.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Diffusion Simulation
* nvcc -arch=sm_30 DS.cu -run
* */
#include <stdio.h>
#include <stdlib.h>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <sys/time.h>
// set a 3D volume
//define the data set size (cubic volume)
#define DATAXSIZE 128
#define DATAYSIZE 128
#define DATAZSIZE 128
//block size = 8*8*8 = 512
#define BLKXSIZE 8
#define BLKYSIZE 8
#define BLKZSIZE 8
//time iteration
#define t 10000
//OpenGL version
#define DIM 128
//CPU Validation
#define CPUV 0
// device function to set the 3D volume
__global__ void diffusion(float (*output_array)[DATAYSIZE][DATAXSIZE],
float (*shadow_array)[DATAYSIZE][DATAXSIZE])
{
// // get grid, only works on GTX 1000 up
// grid_group g = this_grid();
// get position
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int idy = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int idz = blockIdx.z*blockDim.z + threadIdx.z;
// not the edge
if(idx>0 && idx<DATAXSIZE-1 && idy>0 && idy<DATAYSIZE-1 && idz>0 && idz<DATAZSIZE-1){
output_array[idz][idy][idx] = (shadow_array[idz][idy][idx-1] + shadow_array[idz][idy][idx+1]
+ shadow_array[idz][idy-1][idx] + shadow_array[idz][idy+1][idx]
+ shadow_array[idz-1][idy][idx] + shadow_array[idz+1][idy][idx])/6;
}
// reach to the edge to rebound
else{
int nbr = 6;
if(idx==0 || idx==DATAXSIZE-1) nbr-=1;
if(idy==0 || idy==DATAYSIZE-1) nbr-=1;
if(idz==0 || idz==DATAZSIZE-1) nbr-=1;
output_array[idz][idy][idx] = (((idx==0)? 0:shadow_array[idz][idy][idx-1]) + ((idx==(DATAXSIZE-1))? 0: shadow_array[idz][idy][idx+1])
+ ((idy==0)? 0:shadow_array[idz][idy-1][idx]) + ((idy==(DATAYSIZE-1))? 0: shadow_array[idz][idy+1][idx])
+ ((idz==0)? 0:shadow_array[idz-1][idy][idx])+ ((idz==(DATAZSIZE-1))? 0: shadow_array[idz+1][idy][idx]))/nbr;
}
}
// refresh shadow array
__global__ void refesh(float (*output_array)[DATAYSIZE][DATAXSIZE],
float (*shadow_array)[DATAYSIZE][DATAXSIZE])
{
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int idy = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int idz = blockIdx.z*blockDim.z + threadIdx.z;
// updae shadow and reset barrier
shadow_array[idz][idy][idx] = output_array[idz][idy][idx];
}
// cpu version for validation
void diffusion_cpu(float (*output_array)[DATAYSIZE][DATAXSIZE],
float (*shadow_array)[DATAYSIZE][DATAXSIZE])
{
for(int time=1; time<=t; time++){
for(int idz=0;idz<DATAZSIZE;idz++)
for(int idy=0;idy<DATAYSIZE;idy++)
for(int idx=0;idx<DATAXSIZE;idx++){
if(idx>0 && idx<DATAXSIZE-1 && idy>0 && idy<DATAYSIZE-1 && idz>0 && idz<DATAZSIZE-1){
output_array[idz][idy][idx] = (shadow_array[idz][idy][idx-1] + shadow_array[idz][idy][idx+1]
+ shadow_array[idz][idy-1][idx] + shadow_array[idz][idy+1][idx]
+ shadow_array[idz-1][idy][idx] + shadow_array[idz+1][idy][idx])/6;
}
// reach to the edge to rebound
else{
int nbr = 6;
if(idx==0 || idx==DATAXSIZE-1) nbr-=1;
if(idy==0 || idy==DATAYSIZE-1) nbr-=1;
if(idz==0 || idz==DATAZSIZE-1) nbr-=1;
output_array[idz][idy][idx] = (((idx==0)? 0:shadow_array[idz][idy][idx-1]) + ((idx==(DATAXSIZE-1))? 0: shadow_array[idz][idy][idx+1])
+ ((idy==0)? 0:shadow_array[idz][idy-1][idx]) + ((idy==(DATAYSIZE-1))? 0: shadow_array[idz][idy+1][idx])
+ ((idz==0)? 0:shadow_array[idz-1][idy][idx])+ ((idz==(DATAZSIZE-1))? 0: shadow_array[idz+1][idy][idx]))/nbr;
// printf("%d,%d,%d-%f \n", idz,idy,idx, output_array[idz][idy][idx]);
}
}
// updae shadow and reset barrier/signal
for(int idz=0;idz<DATAZSIZE;idz++)
for(int idy=0;idy<DATAYSIZE;idy++)
for(int idx=0;idx<DATAXSIZE;idx++){
shadow_array[idz][idy][idx] = output_array[idz][idy][idx];
}
}
}
int main(int argc, char *argv[])
{
typedef float nRarray[DATAYSIZE][DATAXSIZE];
// overall data set sizes
const int nx = DATAXSIZE;
const int ny = DATAYSIZE;
const int nz = DATAZSIZE;
// error code
hipError_t result;
// initial position
unsigned int init_pos[6] = {0};
init_pos[0] = DATAZSIZE/2 - 2;// x min
init_pos[1] = DATAZSIZE/2 + 2;// x max
init_pos[2] = DATAYSIZE/2 - 2;// y min
init_pos[3] = DATAYSIZE/2 + 2;// y max
init_pos[4] = DATAXSIZE/2 - 2;// z min
init_pos[5] = DATAXSIZE/2 + 2;// z max
//initial concetration
const float con_begin = 20000.0;
// pointers for data set storage via malloc
nRarray *output_c; // storage for result stored on host
nRarray *output_d; // storage for result computed on device
nRarray *shadow_c; // shadow array for saving temp value on host
nRarray *shadow_d; // shadow array for saving temp value
nRarray *output_cpu; // for cpu version
nRarray *shadow_cpu; // for cpu version
// allocate storage for receiving output
if ((output_c = (nRarray *)malloc((nx*ny*nz)*sizeof(float))) == 0) {
fprintf(stderr,"malloc1 Fail \n"); return 1;
}
// allocate storage for shadow arry
if ((shadow_c = (nRarray *)malloc((nx*ny*nz)*sizeof(float))) == 0) {
fprintf(stderr,"malloc1 Fail \n"); return 1;
}
if ((output_cpu = (nRarray *)malloc((nx*ny*nz)*sizeof(float))) == 0) {
fprintf(stderr,"malloc1 Fail \n"); return 1;
}
// allocate storage for shadow arry
if ((shadow_cpu = (nRarray *)malloc((nx*ny*nz)*sizeof(float))) == 0) {
fprintf(stderr,"malloc1 Fail \n"); return 1;
}
// inital concetration
for(int k=init_pos[0];k<=init_pos[1];k++)
for(int j=init_pos[2]; j<=init_pos[3];j++)
for(int i=init_pos[4];i<=init_pos[5];i++ ){
output_c[k][j][i] = con_begin;
shadow_c[k][j][i] = con_begin;
output_cpu[k][j][i] = con_begin;
shadow_cpu[k][j][i] = con_begin;
}
// allocate GPU device buffers
result = hipMalloc((void **) &output_d, (nx*ny*nz)*sizeof(float));
if (result != hipSuccess) {
fprintf(stderr, ("Failed to allocate device buffer-output_d"));
exit(1);
}
result = hipMalloc((void **) &shadow_d, (nx*ny*nz)*sizeof(float));
if (result != hipSuccess) {
fprintf(stderr, ("Failed to allocate device buffer- shadow_d"));
exit(1);
}
// copy host to device
result = hipMemcpy(shadow_d, shadow_c, (nx*ny*nz)*sizeof(float), hipMemcpyHostToDevice);
if (result != hipSuccess) {
fprintf(stderr, "hipMemcpy host->dev (block) failed.");
exit(1);
}
result = hipMemcpy(output_d, output_c, (nx*ny*nz)*sizeof(float), hipMemcpyHostToDevice);
if (result != hipSuccess) {
fprintf(stderr, "hipMemcpy host->dev (block) failed.");
exit(1);
}
//timing
struct timeval start_cpu, finish_cpu,start_gpu, finish_gpu;
// timing start
gettimeofday (&start_gpu, NULL);
// compute result
const dim3 blockSize(BLKXSIZE, BLKYSIZE, BLKZSIZE);
const dim3 gridSize((DATAXSIZE/BLKXSIZE), (DATAYSIZE/BLKYSIZE), (DATAZSIZE/BLKZSIZE));
// loop with time t
for(int time=1; time<=t; time++){
hipLaunchKernelGGL(( diffusion), dim3(gridSize),dim3(blockSize), 0, 0, output_d,shadow_d);
hipLaunchKernelGGL(( refesh), dim3(gridSize),dim3(blockSize), 0, 0, output_d,shadow_d);
// hipDeviceSynchronize();
}
// copy output data back to host
result = hipMemcpy(output_c, output_d, ((nx*ny*nz)*sizeof(float)), hipMemcpyDeviceToHost);
if (result != hipSuccess) {
fprintf(stderr, ("hipMemcpy dev (block)->host failed."));
exit(1);
}
//CPU validation is ON/OFF
if(CPUV){
//timing end
gettimeofday (&finish_gpu, NULL);
double elapsed_gpu = (finish_gpu.tv_sec - start_gpu.tv_sec)*1000000 + finish_gpu.tv_usec - start_gpu.tv_usec;
// timing start
gettimeofday (&start_cpu, NULL);
//cpu version
diffusion_cpu(output_cpu,shadow_cpu);
//timing end
gettimeofday (&finish_cpu, NULL);
double elapsed_cpu = (finish_cpu.tv_sec - start_cpu.tv_sec)*1000000 + finish_cpu.tv_usec - start_cpu.tv_usec;
//check two version
for (unsigned i=0; i<nz; i++)
for (unsigned j=0; j<ny; j++)
for (unsigned k=0; k<nx; k++){
if(output_c[i][j][k]!=output_cpu[i][j][k])
{
printf("check error happen \n");
printf("position:[%d][%d][%d]. value:%f:%f \n",i,j,k ,output_c[i][j][k],output_cpu[i][j][k]);
// return 0;
}
}
printf("Time spent(GPU) of dt = %d: %f \n",t,elapsed_gpu);
printf("Time spent(CPU) of dt = %d: %f \n",t,elapsed_cpu);
}
//write result to file
std::ofstream myfile;
myfile.open ("DS-2.csv",std::ios_base::app);
// myfile.open ("DS.csv");
for (unsigned i=0; i<nz; i++)
for (unsigned j=0; j<ny; j++)
for (unsigned k=0; k<nx; k++){
if( output_c[i][j][k]!=0)
myfile << i << "," << j << "," << k << "," << output_c[i][j][k] << "," << std::to_string(t) << std::endl;
}
myfile.close();
// free memory
free(output_c);
free(shadow_c);
result = hipFree(shadow_d);
if (result != hipSuccess) {
fprintf(stderr, ("Failed to Free device buffer - shadow_d"));
exit(1);
}
result = hipFree(output_d);
if (result != hipSuccess) {
fprintf(stderr, ("Failed to Free device buffer - output_d"));
exit(1);
}
return 0;
}
| 0064e393e8732eaefd81594ebb225165ecaf6150.cu | /* Diffusion Simulation
* nvcc -arch=sm_30 DS.cu -run
* */
#include <stdio.h>
#include <stdlib.h>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <sys/time.h>
// set a 3D volume
//define the data set size (cubic volume)
#define DATAXSIZE 128
#define DATAYSIZE 128
#define DATAZSIZE 128
//block size = 8*8*8 = 512
#define BLKXSIZE 8
#define BLKYSIZE 8
#define BLKZSIZE 8
//time iteration
#define t 10000
//OpenGL version
#define DIM 128
//CPU Validation
#define CPUV 0
// device function to set the 3D volume
__global__ void diffusion(float (*output_array)[DATAYSIZE][DATAXSIZE],
float (*shadow_array)[DATAYSIZE][DATAXSIZE])
{
// // get grid, only works on GTX 1000 up
// grid_group g = this_grid();
// get position
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int idy = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int idz = blockIdx.z*blockDim.z + threadIdx.z;
// not the edge
if(idx>0 && idx<DATAXSIZE-1 && idy>0 && idy<DATAYSIZE-1 && idz>0 && idz<DATAZSIZE-1){
output_array[idz][idy][idx] = (shadow_array[idz][idy][idx-1] + shadow_array[idz][idy][idx+1]
+ shadow_array[idz][idy-1][idx] + shadow_array[idz][idy+1][idx]
+ shadow_array[idz-1][idy][idx] + shadow_array[idz+1][idy][idx])/6;
}
// reach to the edge to rebound
else{
int nbr = 6;
if(idx==0 || idx==DATAXSIZE-1) nbr-=1;
if(idy==0 || idy==DATAYSIZE-1) nbr-=1;
if(idz==0 || idz==DATAZSIZE-1) nbr-=1;
output_array[idz][idy][idx] = (((idx==0)? 0:shadow_array[idz][idy][idx-1]) + ((idx==(DATAXSIZE-1))? 0: shadow_array[idz][idy][idx+1])
+ ((idy==0)? 0:shadow_array[idz][idy-1][idx]) + ((idy==(DATAYSIZE-1))? 0: shadow_array[idz][idy+1][idx])
+ ((idz==0)? 0:shadow_array[idz-1][idy][idx])+ ((idz==(DATAZSIZE-1))? 0: shadow_array[idz+1][idy][idx]))/nbr;
}
}
// refresh shadow array
__global__ void refesh(float (*output_array)[DATAYSIZE][DATAXSIZE],
float (*shadow_array)[DATAYSIZE][DATAXSIZE])
{
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int idy = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int idz = blockIdx.z*blockDim.z + threadIdx.z;
// updae shadow and reset barrier
shadow_array[idz][idy][idx] = output_array[idz][idy][idx];
}
// cpu version for validation
void diffusion_cpu(float (*output_array)[DATAYSIZE][DATAXSIZE],
float (*shadow_array)[DATAYSIZE][DATAXSIZE])
{
for(int time=1; time<=t; time++){
for(int idz=0;idz<DATAZSIZE;idz++)
for(int idy=0;idy<DATAYSIZE;idy++)
for(int idx=0;idx<DATAXSIZE;idx++){
if(idx>0 && idx<DATAXSIZE-1 && idy>0 && idy<DATAYSIZE-1 && idz>0 && idz<DATAZSIZE-1){
output_array[idz][idy][idx] = (shadow_array[idz][idy][idx-1] + shadow_array[idz][idy][idx+1]
+ shadow_array[idz][idy-1][idx] + shadow_array[idz][idy+1][idx]
+ shadow_array[idz-1][idy][idx] + shadow_array[idz+1][idy][idx])/6;
}
// reach to the edge to rebound
else{
int nbr = 6;
if(idx==0 || idx==DATAXSIZE-1) nbr-=1;
if(idy==0 || idy==DATAYSIZE-1) nbr-=1;
if(idz==0 || idz==DATAZSIZE-1) nbr-=1;
output_array[idz][idy][idx] = (((idx==0)? 0:shadow_array[idz][idy][idx-1]) + ((idx==(DATAXSIZE-1))? 0: shadow_array[idz][idy][idx+1])
+ ((idy==0)? 0:shadow_array[idz][idy-1][idx]) + ((idy==(DATAYSIZE-1))? 0: shadow_array[idz][idy+1][idx])
+ ((idz==0)? 0:shadow_array[idz-1][idy][idx])+ ((idz==(DATAZSIZE-1))? 0: shadow_array[idz+1][idy][idx]))/nbr;
// printf("%d,%d,%d-%f \n", idz,idy,idx, output_array[idz][idy][idx]);
}
}
// updae shadow and reset barrier/signal
for(int idz=0;idz<DATAZSIZE;idz++)
for(int idy=0;idy<DATAYSIZE;idy++)
for(int idx=0;idx<DATAXSIZE;idx++){
shadow_array[idz][idy][idx] = output_array[idz][idy][idx];
}
}
}
int main(int argc, char *argv[])
{
typedef float nRarray[DATAYSIZE][DATAXSIZE];
// overall data set sizes
const int nx = DATAXSIZE;
const int ny = DATAYSIZE;
const int nz = DATAZSIZE;
// error code
cudaError_t result;
// initial position
unsigned int init_pos[6] = {0};
init_pos[0] = DATAZSIZE/2 - 2;// x min
init_pos[1] = DATAZSIZE/2 + 2;// x max
init_pos[2] = DATAYSIZE/2 - 2;// y min
init_pos[3] = DATAYSIZE/2 + 2;// y max
init_pos[4] = DATAXSIZE/2 - 2;// z min
init_pos[5] = DATAXSIZE/2 + 2;// z max
//initial concetration
const float con_begin = 20000.0;
// pointers for data set storage via malloc
nRarray *output_c; // storage for result stored on host
nRarray *output_d; // storage for result computed on device
nRarray *shadow_c; // shadow array for saving temp value on host
nRarray *shadow_d; // shadow array for saving temp value
nRarray *output_cpu; // for cpu version
nRarray *shadow_cpu; // for cpu version
// allocate storage for receiving output
if ((output_c = (nRarray *)malloc((nx*ny*nz)*sizeof(float))) == 0) {
fprintf(stderr,"malloc1 Fail \n"); return 1;
}
// allocate storage for shadow arry
if ((shadow_c = (nRarray *)malloc((nx*ny*nz)*sizeof(float))) == 0) {
fprintf(stderr,"malloc1 Fail \n"); return 1;
}
if ((output_cpu = (nRarray *)malloc((nx*ny*nz)*sizeof(float))) == 0) {
fprintf(stderr,"malloc1 Fail \n"); return 1;
}
// allocate storage for shadow arry
if ((shadow_cpu = (nRarray *)malloc((nx*ny*nz)*sizeof(float))) == 0) {
fprintf(stderr,"malloc1 Fail \n"); return 1;
}
// inital concetration
for(int k=init_pos[0];k<=init_pos[1];k++)
for(int j=init_pos[2]; j<=init_pos[3];j++)
for(int i=init_pos[4];i<=init_pos[5];i++ ){
output_c[k][j][i] = con_begin;
shadow_c[k][j][i] = con_begin;
output_cpu[k][j][i] = con_begin;
shadow_cpu[k][j][i] = con_begin;
}
// allocate GPU device buffers
result = cudaMalloc((void **) &output_d, (nx*ny*nz)*sizeof(float));
if (result != cudaSuccess) {
fprintf(stderr, ("Failed to allocate device buffer-output_d"));
exit(1);
}
result = cudaMalloc((void **) &shadow_d, (nx*ny*nz)*sizeof(float));
if (result != cudaSuccess) {
fprintf(stderr, ("Failed to allocate device buffer- shadow_d"));
exit(1);
}
// copy host to device
result = cudaMemcpy(shadow_d, shadow_c, (nx*ny*nz)*sizeof(float), cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
fprintf(stderr, "cudaMemcpy host->dev (block) failed.");
exit(1);
}
result = cudaMemcpy(output_d, output_c, (nx*ny*nz)*sizeof(float), cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
fprintf(stderr, "cudaMemcpy host->dev (block) failed.");
exit(1);
}
//timing
struct timeval start_cpu, finish_cpu,start_gpu, finish_gpu;
// timing start
gettimeofday (&start_gpu, NULL);
// compute result
const dim3 blockSize(BLKXSIZE, BLKYSIZE, BLKZSIZE);
const dim3 gridSize((DATAXSIZE/BLKXSIZE), (DATAYSIZE/BLKYSIZE), (DATAZSIZE/BLKZSIZE));
// loop with time t
for(int time=1; time<=t; time++){
diffusion<<<gridSize,blockSize>>>(output_d,shadow_d);
refesh<<<gridSize,blockSize>>>(output_d,shadow_d);
// cudaDeviceSynchronize();
}
// copy output data back to host
result = cudaMemcpy(output_c, output_d, ((nx*ny*nz)*sizeof(float)), cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
fprintf(stderr, ("cudaMemcpy dev (block)->host failed."));
exit(1);
}
//CPU validation is ON/OFF
if(CPUV){
//timing end
gettimeofday (&finish_gpu, NULL);
double elapsed_gpu = (finish_gpu.tv_sec - start_gpu.tv_sec)*1000000 + finish_gpu.tv_usec - start_gpu.tv_usec;
// timing start
gettimeofday (&start_cpu, NULL);
//cpu version
diffusion_cpu(output_cpu,shadow_cpu);
//timing end
gettimeofday (&finish_cpu, NULL);
double elapsed_cpu = (finish_cpu.tv_sec - start_cpu.tv_sec)*1000000 + finish_cpu.tv_usec - start_cpu.tv_usec;
//check two version
for (unsigned i=0; i<nz; i++)
for (unsigned j=0; j<ny; j++)
for (unsigned k=0; k<nx; k++){
if(output_c[i][j][k]!=output_cpu[i][j][k])
{
printf("check error happen \n");
printf("position:[%d][%d][%d]. value:%f:%f \n",i,j,k ,output_c[i][j][k],output_cpu[i][j][k]);
// return 0;
}
}
printf("Time spent(GPU) of dt = %d: %f \n",t,elapsed_gpu);
printf("Time spent(CPU) of dt = %d: %f \n",t,elapsed_cpu);
}
//write result to file
std::ofstream myfile;
myfile.open ("DS-2.csv",std::ios_base::app);
// myfile.open ("DS.csv");
for (unsigned i=0; i<nz; i++)
for (unsigned j=0; j<ny; j++)
for (unsigned k=0; k<nx; k++){
if( output_c[i][j][k]!=0)
myfile << i << "," << j << "," << k << "," << output_c[i][j][k] << "," << std::to_string(t) << std::endl;
}
myfile.close();
// free memory
free(output_c);
free(shadow_c);
result = cudaFree(shadow_d);
if (result != cudaSuccess) {
fprintf(stderr, ("Failed to Free device buffer - shadow_d"));
exit(1);
}
result = cudaFree(output_d);
if (result != cudaSuccess) {
fprintf(stderr, ("Failed to Free device buffer - output_d"));
exit(1);
}
return 0;
}
|
9e63ca97fd614a7b807f5d64a00a7fe7f6bce7dc.hip | // !!! This is a file automatically generated by hipify!!!
/*
============================================================================
Name : cudaFunction.cu
Author : Miriam Assraf
Description : histogram calculation with CUDA
============================================================================
*/
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <stdio.h>
#include "cudaFuncs.h"
#include "constants.h"
__global__ void histogramKernel(const int* A, int* results, int size, int num_blocks)
{
int block = blockIdx.x;
int thread = threadIdx.x;
int start = (block * size) / num_blocks;
int end = ((block + 1) * size) / num_blocks;
for (int i = start; i < end; i++) {
if (thread < HISTO_SZ) {
if (A[i] == thread) {
results[block * HISTO_SZ + thread]++; // increase value thread (in range 0-255) for current block
}
}
}
}
__host__ void checkErrors(hipError_t err, const char *error_msg)
{
if (err != hipSuccess) {
fprintf(stderr, error_msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
int histogramWithCuda(const int* A, int* results, int size, int threadsPerBlock, int blocksPerGrid)
{
hipError_t err = hipSuccess; // Error code to check return values for CUDA calls
int* dev_A = 0;
int* dev_results = 0;
// Allocate memory on GPU to copy the data from the host
err = hipMalloc((void**)&dev_A, size * sizeof(int));
checkErrors(err, "Failed to allocate device memory - %s\n");
// Copy data from host to the GPU memory
err = hipMemcpy(dev_A, A, size * sizeof(int), hipMemcpyHostToDevice);
checkErrors(err, "Failed to copy data from host to device - %s\n");
// Allocate memory on GPU for results to send to host
err =hipMalloc((void**)&dev_results, blocksPerGrid* HISTO_SZ * sizeof(int)); // each block check on it's part for values 0-255
checkErrors(err, "Failed to allocate device memory - %s\n");
hipMemset(dev_results, 0, blocksPerGrid * HISTO_SZ * sizeof(int)); // initialize results array with zeros
// Launch the Kernel
histogramKernel << <blocksPerGrid, threadsPerBlock >> > (dev_A, dev_results, size, blocksPerGrid);
err = hipGetLastError();
checkErrors(err, "Failed to launch histogram kernel - %s\n");
// Copy the result from GPU to the host memory.
err = hipMemcpy(results, dev_results, blocksPerGrid*256 * sizeof(int), hipMemcpyDeviceToHost);
checkErrors(err, "Failed to copy result array from device to host -%s\n");
// Free allocated memory on GPU
err = hipFree(dev_A);
checkErrors(err, "Failed to free device data - %s\n");
err = hipFree(dev_results);
checkErrors(err, "Failed to free device results - %s\n");
return 0;
}
| 9e63ca97fd614a7b807f5d64a00a7fe7f6bce7dc.cu | /*
============================================================================
Name : cudaFunction.cu
Author : Miriam Assraf
Description : histogram calculation with CUDA
============================================================================
*/
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <stdio.h>
#include "cudaFuncs.h"
#include "constants.h"
__global__ void histogramKernel(const int* A, int* results, int size, int num_blocks)
{
int block = blockIdx.x;
int thread = threadIdx.x;
int start = (block * size) / num_blocks;
int end = ((block + 1) * size) / num_blocks;
for (int i = start; i < end; i++) {
if (thread < HISTO_SZ) {
if (A[i] == thread) {
results[block * HISTO_SZ + thread]++; // increase value thread (in range 0-255) for current block
}
}
}
}
__host__ void checkErrors(cudaError_t err, const char *error_msg)
{
if (err != cudaSuccess) {
fprintf(stderr, error_msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
int histogramWithCuda(const int* A, int* results, int size, int threadsPerBlock, int blocksPerGrid)
{
cudaError_t err = cudaSuccess; // Error code to check return values for CUDA calls
int* dev_A = 0;
int* dev_results = 0;
// Allocate memory on GPU to copy the data from the host
err = cudaMalloc((void**)&dev_A, size * sizeof(int));
checkErrors(err, "Failed to allocate device memory - %s\n");
// Copy data from host to the GPU memory
err = cudaMemcpy(dev_A, A, size * sizeof(int), cudaMemcpyHostToDevice);
checkErrors(err, "Failed to copy data from host to device - %s\n");
// Allocate memory on GPU for results to send to host
err =cudaMalloc((void**)&dev_results, blocksPerGrid* HISTO_SZ * sizeof(int)); // each block check on it's part for values 0-255
checkErrors(err, "Failed to allocate device memory - %s\n");
cudaMemset(dev_results, 0, blocksPerGrid * HISTO_SZ * sizeof(int)); // initialize results array with zeros
// Launch the Kernel
histogramKernel << <blocksPerGrid, threadsPerBlock >> > (dev_A, dev_results, size, blocksPerGrid);
err = cudaGetLastError();
checkErrors(err, "Failed to launch histogram kernel - %s\n");
// Copy the result from GPU to the host memory.
err = cudaMemcpy(results, dev_results, blocksPerGrid*256 * sizeof(int), cudaMemcpyDeviceToHost);
checkErrors(err, "Failed to copy result array from device to host -%s\n");
// Free allocated memory on GPU
err = cudaFree(dev_A);
checkErrors(err, "Failed to free device data - %s\n");
err = cudaFree(dev_results);
checkErrors(err, "Failed to free device results - %s\n");
return 0;
}
|
0aae2e3c7ab05292214949b8999461687d7bb33f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <hip/hip_runtime.h>
#define _QUEENS_BLOCK_SIZE_ 128
#define _EMPTY_ -1
typedef struct queen_root{
unsigned int control;
int8_t board[12];
} QueenRoot;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
inline void prefixesHandleSol(QueenRoot *root_prefixes, unsigned int flag,
const char *board, int initialDepth, int num_sol)
{
root_prefixes[num_sol].control = flag;
for(int i = 0; i<initialDepth;++i)
root_prefixes[num_sol].board[i] = board[i];
}
inline bool MCstillLegal(const char *board, const int r)
{
// Check vertical
for (int i = 0; i < r; ++i)
if (board[i] == board[r]) return false;
// Check diagonals
int ld = board[r]; //left diagonal columns
int rd = board[r]; // right diagonal columns
for (int i = r-1; i >= 0; --i) {
--ld; ++rd;
if (board[i] == ld || board[i] == rd) return false;
}
return true;
}
__device__ bool queens_stillLegal(const char *board, const int r)
{
bool safe = true;
// Check vertical
for (int i = 0; i < r; ++i)
if (board[i] == board[r]) safe = false;
// Check diagonals
int ld = board[r]; //left diagonal columns
int rd = board[r]; // right diagonal columns
for (int i = r-1; i >= 0; --i) {
--ld; ++rd;
if (board[i] == ld || board[i] == rd) safe = false;
}
return safe;
}
__global__ void BP_queens_root_dfs(
int N, unsigned int nPreFixos, int depthPreFixos,
const QueenRoot *__restrict__ root_prefixes,
unsigned long long *__restrict__ vector_of_tree_size,
unsigned long long *__restrict__ sols)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < nPreFixos) {
unsigned int flag = 0;
unsigned int bit_test = 0;
char vertice[20];
int N_l = N;
int i, depth;
unsigned long long qtd_solutions_thread = 0ULL;
int depthGlobal = depthPreFixos;
unsigned long long tree_size = 0ULL;
#pragma unroll 2
for (i = 0; i < N_l; ++i) {
vertice[i] = _EMPTY_;
}
flag = root_prefixes[idx].control;
#pragma unroll 2
for (i = 0; i < depthGlobal; ++i)
vertice[i] = root_prefixes[idx].board[i];
depth = depthGlobal;
do {
vertice[depth]++;
bit_test = 0;
bit_test |= (1<<vertice[depth]);
if(vertice[depth] == N_l){
vertice[depth] = _EMPTY_;
} else if (!(flag & bit_test ) && queens_stillLegal(vertice, depth)){
++tree_size;
flag |= (1ULL<<vertice[depth]);
depth++;
if (depth == N_l) { //sol
++qtd_solutions_thread;
} else continue;
} else continue;
depth--;
flag &= ~(1ULL<<vertice[depth]);
} while(depth >= depthGlobal);
sols[idx] = qtd_solutions_thread;
vector_of_tree_size[idx] = tree_size;
}//if
}//kernel
unsigned long long BP_queens_prefixes(int size, int initialDepth,
unsigned long long *tree_size,
QueenRoot *root_prefixes)
{
unsigned int flag = 0;
int bit_test = 0;
char vertice[20];
int i, nivel;
unsigned long long local_tree = 0ULL;
unsigned long long num_sol = 0;
for (i = 0; i < size; ++i) {
vertice[i] = -1;
}
nivel = 0;
do{
vertice[nivel]++;
bit_test = 0;
bit_test |= (1<<vertice[nivel]);
if(vertice[nivel] == size){
vertice[nivel] = _EMPTY_;
}else if ( MCstillLegal(vertice, nivel) && !(flag & bit_test ) ){ //is legal
flag |= (1ULL<<vertice[nivel]);
nivel++;
++local_tree;
if (nivel == initialDepth){ //handle solution
prefixesHandleSol(root_prefixes,flag,vertice,initialDepth,num_sol);
num_sol++;
}else continue;
}else continue;
nivel--;
flag &= ~(1ULL<<vertice[nivel]);
}while(nivel >= 0);
*tree_size = local_tree;
return num_sol;
}
void nqueens(short size, int initial_depth, unsigned int n_explorers, QueenRoot *root_prefixes_h ,
unsigned long long *vector_of_tree_size_h, unsigned long long *sols_h, const int repeat)
{
unsigned long long *vector_of_tree_size_d;
unsigned long long *sols_d;
QueenRoot *root_prefixes_d;
int num_blocks = ceil((double)n_explorers/_QUEENS_BLOCK_SIZE_);
hipMalloc((void**) &vector_of_tree_size_d, n_explorers*sizeof(unsigned long long));
hipMalloc((void**) &sols_d, n_explorers*sizeof(unsigned long long));
hipMalloc((void**) &root_prefixes_d, n_explorers*sizeof(QueenRoot));
hipMemcpy(root_prefixes_d, root_prefixes_h, n_explorers * sizeof(QueenRoot), hipMemcpyHostToDevice);
printf("\n### Regular BP-DFS search. ###\n");
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
hipLaunchKernelGGL(( BP_queens_root_dfs), dim3(num_blocks),dim3(_QUEENS_BLOCK_SIZE_), 0, 0,
size,
n_explorers,
initial_depth,
root_prefixes_d,
vector_of_tree_size_d,
sols_d);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (s)\n", (time * 1e-9f) / repeat);
hipMemcpy(vector_of_tree_size_h, vector_of_tree_size_d, n_explorers*sizeof(unsigned long long), hipMemcpyDeviceToHost);
hipMemcpy(sols_h, sols_d, n_explorers*sizeof(unsigned long long), hipMemcpyDeviceToHost);
hipFree(vector_of_tree_size_d);
hipFree(sols_d);
hipFree(root_prefixes_d);
}
int main(int argc, char *argv[])
{
if (argc != 4) {
printf("Usage: %s <size> <initial depth> <repeat>\n", argv[0]);
return 1;
}
const short size = atoi(argv[1]); // 15 - 17 for a short run
const int initialDepth = atoi(argv[2]); // 6 or 7
const int repeat = atoi(argv[3]); // kernel execution times
printf("\n### Initial depth: %d - Size: %d:", initialDepth, size);
unsigned long long tree_size = 0ULL;
unsigned long long qtd_sols_global = 0ULL;
unsigned int nMaxPrefixos = 75580635;
QueenRoot* root_prefixes_h = (QueenRoot*)malloc(sizeof(QueenRoot)*nMaxPrefixos);
unsigned long long *vector_of_tree_size_h = (unsigned long long*)malloc(sizeof(unsigned long long)*nMaxPrefixos);
unsigned long long *solutions_h = (unsigned long long*)malloc(sizeof(unsigned long long)*nMaxPrefixos);
if (root_prefixes_h == NULL || vector_of_tree_size_h == NULL || solutions_h == NULL) {
printf("Error: host out of memory\n");
if (root_prefixes_h) free(root_prefixes_h);
if (vector_of_tree_size_h) free(vector_of_tree_size_h);
if (solutions_h) free(solutions_h);
return 1;
}
//initial search, getting the tree root nodes for the gpu;
unsigned long long n_explorers = BP_queens_prefixes(size, initialDepth, &tree_size, root_prefixes_h);
//calling the gpu-based search
nqueens(size, initialDepth, n_explorers, root_prefixes_h, vector_of_tree_size_h, solutions_h, repeat);
printf("\nTree size: %llu", tree_size );
for(unsigned long long i = 0; i<n_explorers;++i){
if(solutions_h[i]>0)
qtd_sols_global += solutions_h[i];
if(vector_of_tree_size_h[i]>0)
tree_size +=vector_of_tree_size_h[i];
}
printf("\nNumber of solutions found: %llu \nTree size: %llu\n", qtd_sols_global, tree_size );
// Initial depth: 7 - Size: 15:
// Tree size: 2466109
// Number of solutions found: 2279184
// Tree size: 171129071
if (size == 15 && initialDepth == 7) {
if (qtd_sols_global == 2279184 && tree_size == 171129071)
printf("PASS\n");
else
printf("FAIL\n");
}
free(root_prefixes_h);
free(vector_of_tree_size_h);
free(solutions_h);
return 0;
}
| 0aae2e3c7ab05292214949b8999461687d7bb33f.cu | #include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <cuda.h>
#define _QUEENS_BLOCK_SIZE_ 128
#define _EMPTY_ -1
typedef struct queen_root{
unsigned int control;
int8_t board[12];
} QueenRoot;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
inline void prefixesHandleSol(QueenRoot *root_prefixes, unsigned int flag,
const char *board, int initialDepth, int num_sol)
{
root_prefixes[num_sol].control = flag;
for(int i = 0; i<initialDepth;++i)
root_prefixes[num_sol].board[i] = board[i];
}
inline bool MCstillLegal(const char *board, const int r)
{
// Check vertical
for (int i = 0; i < r; ++i)
if (board[i] == board[r]) return false;
// Check diagonals
int ld = board[r]; //left diagonal columns
int rd = board[r]; // right diagonal columns
for (int i = r-1; i >= 0; --i) {
--ld; ++rd;
if (board[i] == ld || board[i] == rd) return false;
}
return true;
}
__device__ bool queens_stillLegal(const char *board, const int r)
{
bool safe = true;
// Check vertical
for (int i = 0; i < r; ++i)
if (board[i] == board[r]) safe = false;
// Check diagonals
int ld = board[r]; //left diagonal columns
int rd = board[r]; // right diagonal columns
for (int i = r-1; i >= 0; --i) {
--ld; ++rd;
if (board[i] == ld || board[i] == rd) safe = false;
}
return safe;
}
__global__ void BP_queens_root_dfs(
int N, unsigned int nPreFixos, int depthPreFixos,
const QueenRoot *__restrict__ root_prefixes,
unsigned long long *__restrict__ vector_of_tree_size,
unsigned long long *__restrict__ sols)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < nPreFixos) {
unsigned int flag = 0;
unsigned int bit_test = 0;
char vertice[20];
int N_l = N;
int i, depth;
unsigned long long qtd_solutions_thread = 0ULL;
int depthGlobal = depthPreFixos;
unsigned long long tree_size = 0ULL;
#pragma unroll 2
for (i = 0; i < N_l; ++i) {
vertice[i] = _EMPTY_;
}
flag = root_prefixes[idx].control;
#pragma unroll 2
for (i = 0; i < depthGlobal; ++i)
vertice[i] = root_prefixes[idx].board[i];
depth = depthGlobal;
do {
vertice[depth]++;
bit_test = 0;
bit_test |= (1<<vertice[depth]);
if(vertice[depth] == N_l){
vertice[depth] = _EMPTY_;
} else if (!(flag & bit_test ) && queens_stillLegal(vertice, depth)){
++tree_size;
flag |= (1ULL<<vertice[depth]);
depth++;
if (depth == N_l) { //sol
++qtd_solutions_thread;
} else continue;
} else continue;
depth--;
flag &= ~(1ULL<<vertice[depth]);
} while(depth >= depthGlobal);
sols[idx] = qtd_solutions_thread;
vector_of_tree_size[idx] = tree_size;
}//if
}//kernel
unsigned long long BP_queens_prefixes(int size, int initialDepth,
unsigned long long *tree_size,
QueenRoot *root_prefixes)
{
unsigned int flag = 0;
int bit_test = 0;
char vertice[20];
int i, nivel;
unsigned long long local_tree = 0ULL;
unsigned long long num_sol = 0;
for (i = 0; i < size; ++i) {
vertice[i] = -1;
}
nivel = 0;
do{
vertice[nivel]++;
bit_test = 0;
bit_test |= (1<<vertice[nivel]);
if(vertice[nivel] == size){
vertice[nivel] = _EMPTY_;
}else if ( MCstillLegal(vertice, nivel) && !(flag & bit_test ) ){ //is legal
flag |= (1ULL<<vertice[nivel]);
nivel++;
++local_tree;
if (nivel == initialDepth){ //handle solution
prefixesHandleSol(root_prefixes,flag,vertice,initialDepth,num_sol);
num_sol++;
}else continue;
}else continue;
nivel--;
flag &= ~(1ULL<<vertice[nivel]);
}while(nivel >= 0);
*tree_size = local_tree;
return num_sol;
}
void nqueens(short size, int initial_depth, unsigned int n_explorers, QueenRoot *root_prefixes_h ,
unsigned long long *vector_of_tree_size_h, unsigned long long *sols_h, const int repeat)
{
unsigned long long *vector_of_tree_size_d;
unsigned long long *sols_d;
QueenRoot *root_prefixes_d;
int num_blocks = ceil((double)n_explorers/_QUEENS_BLOCK_SIZE_);
cudaMalloc((void**) &vector_of_tree_size_d, n_explorers*sizeof(unsigned long long));
cudaMalloc((void**) &sols_d, n_explorers*sizeof(unsigned long long));
cudaMalloc((void**) &root_prefixes_d, n_explorers*sizeof(QueenRoot));
cudaMemcpy(root_prefixes_d, root_prefixes_h, n_explorers * sizeof(QueenRoot), cudaMemcpyHostToDevice);
printf("\n### Regular BP-DFS search. ###\n");
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
BP_queens_root_dfs<<< num_blocks,_QUEENS_BLOCK_SIZE_>>> (
size,
n_explorers,
initial_depth,
root_prefixes_d,
vector_of_tree_size_d,
sols_d);
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (s)\n", (time * 1e-9f) / repeat);
cudaMemcpy(vector_of_tree_size_h, vector_of_tree_size_d, n_explorers*sizeof(unsigned long long), cudaMemcpyDeviceToHost);
cudaMemcpy(sols_h, sols_d, n_explorers*sizeof(unsigned long long), cudaMemcpyDeviceToHost);
cudaFree(vector_of_tree_size_d);
cudaFree(sols_d);
cudaFree(root_prefixes_d);
}
int main(int argc, char *argv[])
{
if (argc != 4) {
printf("Usage: %s <size> <initial depth> <repeat>\n", argv[0]);
return 1;
}
const short size = atoi(argv[1]); // 15 - 17 for a short run
const int initialDepth = atoi(argv[2]); // 6 or 7
const int repeat = atoi(argv[3]); // kernel execution times
printf("\n### Initial depth: %d - Size: %d:", initialDepth, size);
unsigned long long tree_size = 0ULL;
unsigned long long qtd_sols_global = 0ULL;
unsigned int nMaxPrefixos = 75580635;
QueenRoot* root_prefixes_h = (QueenRoot*)malloc(sizeof(QueenRoot)*nMaxPrefixos);
unsigned long long *vector_of_tree_size_h = (unsigned long long*)malloc(sizeof(unsigned long long)*nMaxPrefixos);
unsigned long long *solutions_h = (unsigned long long*)malloc(sizeof(unsigned long long)*nMaxPrefixos);
if (root_prefixes_h == NULL || vector_of_tree_size_h == NULL || solutions_h == NULL) {
printf("Error: host out of memory\n");
if (root_prefixes_h) free(root_prefixes_h);
if (vector_of_tree_size_h) free(vector_of_tree_size_h);
if (solutions_h) free(solutions_h);
return 1;
}
//initial search, getting the tree root nodes for the gpu;
unsigned long long n_explorers = BP_queens_prefixes(size, initialDepth, &tree_size, root_prefixes_h);
//calling the gpu-based search
nqueens(size, initialDepth, n_explorers, root_prefixes_h, vector_of_tree_size_h, solutions_h, repeat);
printf("\nTree size: %llu", tree_size );
for(unsigned long long i = 0; i<n_explorers;++i){
if(solutions_h[i]>0)
qtd_sols_global += solutions_h[i];
if(vector_of_tree_size_h[i]>0)
tree_size +=vector_of_tree_size_h[i];
}
printf("\nNumber of solutions found: %llu \nTree size: %llu\n", qtd_sols_global, tree_size );
// Initial depth: 7 - Size: 15:
// Tree size: 2466109
// Number of solutions found: 2279184
// Tree size: 171129071
if (size == 15 && initialDepth == 7) {
if (qtd_sols_global == 2279184 && tree_size == 171129071)
printf("PASS\n");
else
printf("FAIL\n");
}
free(root_prefixes_h);
free(vector_of_tree_size_h);
free(solutions_h);
return 0;
}
|
e663d70de2fb352d7d6e91efde6a9cfac7122d8e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/clip_op.h"
namespace caffe2 {
namespace {
template <typename T>
__device__ T cuda_min(T x, T y);
template <typename T>
__device__ T cuda_max(T x, T y);
template <>
__device__ float cuda_min(float x, float y) { return fminf(x, y); }
template <>
__device__ float cuda_max(float x, float y) { return fmaxf(x, y); }
// Disabled since we don't use it right now.
/*
template <>
__device__ double cuda_min(double x, double y) { return fmin(x, y); }
template <>
__device__ double cuda_max(double x, double y) { return fmax(x, y); }
*/
template <typename T>
__global__ void ClipKernel(const int N, const T minval, const T maxval,
const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = cuda_min<T>(cuda_max<T>(X[i], minval), maxval);
}
}
template <typename T>
__global__ void ClipGradientKernel(const int N, const T minval,
const T maxval, const T* Y,
const T* dY, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = dY[i] * (Y[i] > minval && Y[i] < maxval);
}
}
} // namespace
template <>
bool ClipOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
CAFFE_ENFORCE_GE(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( ClipKernel),
dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
X.numel(), min_, max_, X.data<float>(), Y->template mutable_data<float>());
return true;
}
template <>
bool ClipGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
CAFFE_ENFORCE_GE(Y.numel(), 0);
CAFFE_ENFORCE_EQ(dY.numel(), Y.numel());
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( ClipGradientKernel),
dim3(CAFFE_GET_BLOCKS(Y.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
Y.numel(),
min_,
max_,
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(Clip, ClipOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(ClipGradient, ClipGradientOp<float, CUDAContext>);
} // namespace caffe2
| e663d70de2fb352d7d6e91efde6a9cfac7122d8e.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/clip_op.h"
namespace caffe2 {
namespace {
template <typename T>
__device__ T cuda_min(T x, T y);
template <typename T>
__device__ T cuda_max(T x, T y);
template <>
__device__ float cuda_min(float x, float y) { return fminf(x, y); }
template <>
__device__ float cuda_max(float x, float y) { return fmaxf(x, y); }
// Disabled since we don't use it right now.
/*
template <>
__device__ double cuda_min(double x, double y) { return fmin(x, y); }
template <>
__device__ double cuda_max(double x, double y) { return fmax(x, y); }
*/
template <typename T>
__global__ void ClipKernel(const int N, const T minval, const T maxval,
const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = cuda_min<T>(cuda_max<T>(X[i], minval), maxval);
}
}
template <typename T>
__global__ void ClipGradientKernel(const int N, const T minval,
const T maxval, const T* Y,
const T* dY, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = dY[i] * (Y[i] > minval && Y[i] < maxval);
}
}
} // namespace
template <>
bool ClipOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
CAFFE_ENFORCE_GE(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
ClipKernel<<<
CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.numel(), min_, max_, X.data<float>(), Y->template mutable_data<float>());
return true;
}
template <>
bool ClipGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
CAFFE_ENFORCE_GE(Y.numel(), 0);
CAFFE_ENFORCE_EQ(dY.numel(), Y.numel());
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
ClipGradientKernel<<<
CAFFE_GET_BLOCKS(Y.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
Y.numel(),
min_,
max_,
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(Clip, ClipOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(ClipGradient, ClipGradientOp<float, CUDAContext>);
} // namespace caffe2
|
13d6f15979f592dd41c542599243719cffb84932.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/tensorrt/plugin/generic_plugin.h"
#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/framework/op_kernel_type.h"
#include "paddle/fluid/framework/phi_utils.h"
#include "paddle/fluid/inference/tensorrt/dynamic_shape_infermeta_registry.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/compat/op_utils.h"
#include "paddle/phi/core/kernel_context.h"
#include "paddle/phi/core/kernel_factory.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
void BuildPhiKernelContextAttr(const framework::OpDesc& op_desc,
phi::KernelContext* kernel_context,
const phi::KernelSignature& signature,
const phi::Kernel* phi_kernel) {
if (!phi_kernel->IsValid()) {
return;
}
const phi::KernelArgsDef& args_def = phi_kernel->args_def();
const auto& attr_names = signature.attr_names;
const auto& attr_defs = args_def.attribute_defs();
PADDLE_ENFORCE_EQ(
attr_names.size(),
attr_defs.size(),
platform::errors::InvalidArgument(
"The attr_names.size() should be equal to attr_defs.size()."));
framework::AttrReader attr_reader(op_desc.GetAttrMap());
for (size_t k = 0; k < attr_names.size(); ++k) {
auto attr_name = attr_names[k];
auto* attr_ptr = attr_reader.GetAttr(attr_name);
if (attr_ptr) {
switch (attr_defs[k].type_index) {
case phi::AttributeType::SCALAR: {
auto& attr = *attr_ptr;
switch (AttrTypeID(attr)) {
case framework::proto::AttrType::FLOAT:
kernel_context->EmplaceBackAttr(
phi::Scalar(PADDLE_GET_CONST(float, attr)));
break;
case framework::proto::AttrType::FLOAT64:
kernel_context->EmplaceBackAttr(
phi::Scalar(PADDLE_GET_CONST(double, attr)));
break;
case framework::proto::AttrType::INT:
kernel_context->EmplaceBackAttr(
phi::Scalar(PADDLE_GET_CONST(int, attr)));
break;
case framework::proto::AttrType::LONG:
kernel_context->EmplaceBackAttr(
phi::Scalar(PADDLE_GET_CONST(int64_t, attr)));
break;
case framework::proto::AttrType::STRING:
kernel_context->EmplaceBackAttr(
phi::Scalar(PADDLE_GET_CONST(std::string, attr)));
break;
case framework::proto::AttrType::SCALAR:
kernel_context->EmplaceBackAttr(phi::Scalar(
PADDLE_GET_CONST(paddle::experimental::Scalar, attr)));
break;
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported cast op attribute `%s` to Scalar when "
"ProtoAttr2PhiAttr.",
attr_name));
}
} break;
case phi::AttributeType::INT_ARRAY: {
auto& attr = *attr_ptr;
switch (AttrTypeID(attr)) {
case framework::proto::AttrType::INTS:
kernel_context->EmplaceBackAttr(std::move(
phi::IntArray(PADDLE_GET_CONST(std::vector<int32_t>, attr))));
break;
case framework::proto::AttrType::LONGS:
kernel_context->EmplaceBackAttr(std::move(
phi::IntArray(PADDLE_GET_CONST(std::vector<int64_t>, attr))));
break;
case framework::proto::AttrType::INT:
kernel_context->EmplaceBackAttr(
phi::IntArray({PADDLE_GET_CONST(int, attr)}));
break;
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported cast op attribute `%s` to IntArray when "
"ProtoAttr2PhiAttr.",
attr_name));
}
} break;
case phi::AttributeType::SCALARS: {
auto& attr = *attr_ptr;
switch (AttrTypeID(attr)) {
case framework::proto::AttrType::INTS: {
const auto& vec = PADDLE_GET_CONST(std::vector<int32_t>, attr);
std::vector<phi::Scalar> scalar_list;
scalar_list.reserve(vec.size());
for (const auto& val : vec) {
scalar_list.emplace_back(val);
}
kernel_context->EmplaceBackAttr(std::move(scalar_list));
} break;
case framework::proto::AttrType::LONGS: {
const auto& vec = PADDLE_GET_CONST(std::vector<int64_t>, attr);
std::vector<phi::Scalar> scalar_list;
scalar_list.reserve(vec.size());
for (const auto& val : vec) {
scalar_list.emplace_back(val);
}
kernel_context->EmplaceBackAttr(std::move(scalar_list));
} break;
case framework::proto::AttrType::FLOATS: {
const auto& vec = PADDLE_GET_CONST(std::vector<float>, attr);
std::vector<phi::Scalar> scalar_list;
scalar_list.reserve(vec.size());
for (const auto& val : vec) {
scalar_list.emplace_back(val);
}
kernel_context->EmplaceBackAttr(std::move(scalar_list));
} break;
case framework::proto::AttrType::FLOAT64S: {
const auto& vec = PADDLE_GET_CONST(std::vector<double>, attr);
std::vector<phi::Scalar> scalar_list;
scalar_list.reserve(vec.size());
for (const auto& val : vec) {
scalar_list.emplace_back(val);
}
kernel_context->EmplaceBackAttr(std::move(scalar_list));
} break;
case framework::proto::AttrType::SCALARS: {
const auto& vec = PADDLE_GET_CONST(
std::vector<paddle::experimental::Scalar>, attr);
std::vector<phi::Scalar> scalar_list;
scalar_list.reserve(vec.size());
for (const auto& val : vec) {
scalar_list.emplace_back(val);
}
kernel_context->EmplaceBackAttr(std::move(scalar_list));
} break;
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported cast op attribute `%s` to vector<Scalar> when "
"ProtoAttr2PhiAttr.",
attr_name));
}
} break;
default: {
auto& attr = *attr_ptr;
switch (attr_defs[k].type_index) {
case phi::AttributeType::FLOAT32:
kernel_context->EmplaceBackAttr(PADDLE_GET_CONST(float, attr));
break;
case phi::AttributeType::INT32:
kernel_context->EmplaceBackAttr(PADDLE_GET_CONST(int, attr));
break;
case phi::AttributeType::BOOL:
kernel_context->EmplaceBackAttr(PADDLE_GET_CONST(bool, attr));
break;
case phi::AttributeType::INT64:
kernel_context->EmplaceBackAttr(PADDLE_GET_CONST(int64_t, attr));
break;
case phi::AttributeType::INT32S:
kernel_context->EmplaceBackAttr(
PADDLE_GET_CONST(std::vector<int>, attr));
break;
case phi::AttributeType::DATA_TYPE: {
auto data_type = paddle::framework::TransToPhiDataType(
static_cast<framework::proto::VarType::Type>(
PADDLE_GET_CONST(int, attr)));
kernel_context->EmplaceBackAttr(data_type);
} break;
case phi::AttributeType::STRING:
kernel_context->EmplaceBackAttr(
PADDLE_GET_CONST(std::string, attr));
break;
case phi::AttributeType::INT64S:
switch (AttrTypeID(attr)) {
case framework::proto::AttrType::LONGS:
kernel_context->EmplaceBackAttr(
PADDLE_GET_CONST(std::vector<int64_t>, attr));
break;
case framework::proto::AttrType::INTS: {
const auto& vector_int_attr =
PADDLE_GET_CONST(std::vector<int>, attr);
const std::vector<int64_t> vector_int64_attr(
vector_int_attr.begin(), vector_int_attr.end());
kernel_context->EmplaceBackAttr(vector_int64_attr);
} break;
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported cast op attribute `%s` to vector<int64_t> "
"when ProtoAttr2PhiAttr.",
attr_name));
}
break;
case phi::AttributeType::FLOAT32S:
kernel_context->EmplaceBackAttr(
PADDLE_GET_CONST(std::vector<float>, attr));
break;
case phi::AttributeType::STRINGS:
kernel_context->EmplaceBackAttr(
PADDLE_GET_CONST(std::vector<std::string>, attr));
break;
case phi::AttributeType::BOOLS:
kernel_context->EmplaceBackAttr(
PADDLE_GET_CONST(std::vector<bool>, attr));
break;
case phi::AttributeType::FLOAT64S:
kernel_context->EmplaceBackAttr(
PADDLE_GET_CONST(std::vector<double>, attr));
break;
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported cast op attribute `%s` when construct "
"ProtoAttr2PhiAttr.",
attr_name));
}
}
}
}
}
CHECK_EQ(attr_names.size(), kernel_context->AttrsSize());
}
GenericPlugin::GenericPlugin(
const paddle::framework::proto::OpDesc& proto_op_desc,
const InputOutPutVarInfo& in_out_info,
bool with_fp16) {
proto_op_desc_ = proto_op_desc;
op_desc_ = std::move(framework::OpDesc(proto_op_desc_, nullptr));
proto_op_desc_.SerializeToString(&op_meta_data_);
inputs_data_type_ = in_out_info.inputs_data_type;
outputs_data_type_ = in_out_info.outputs_data_type;
with_fp16_ = with_fp16;
}
GenericPlugin::GenericPlugin(
const paddle::framework::proto::OpDesc& proto_op_desc,
const std::vector<int>& inputs_data_type,
const std::vector<int>& outputs_data_type,
bool with_fp16) {
proto_op_desc_ = proto_op_desc;
op_desc_ = std::move(framework::OpDesc(proto_op_desc_, nullptr));
proto_op_desc_.SerializeToString(&op_meta_data_);
inputs_data_type_ = inputs_data_type;
outputs_data_type_ = outputs_data_type;
with_fp16_ = with_fp16;
}
GenericPlugin::GenericPlugin(void const* serial_data, size_t serial_length) {
DeserializeValue(&serial_data, &serial_length, &inputs_data_type_);
DeserializeValue(&serial_data, &serial_length, &outputs_data_type_);
DeserializeValue(&serial_data, &serial_length, &with_fp16_);
std::string op_meta_data((char*)(serial_data), serial_length); // NOLINT
op_meta_data_ = std::move(op_meta_data);
proto_op_desc_.ParseFromString(op_meta_data_);
op_desc_ = std::move(framework::OpDesc(proto_op_desc_, nullptr));
}
int GenericPlugin::getNbOutputs() const TRT_NOEXCEPT {
int res = 0;
for (auto& i : op_desc_.Outputs()) {
if (!i.second.empty()) res += i.second.size();
}
return res;
}
int GenericPlugin::getNbInputs() const TRT_NOEXCEPT {
int res = 0;
for (auto& i : op_desc_.Inputs()) {
if (!i.second.empty()) res += i.second.size();
}
return res;
}
nvinfer1::IPluginV2DynamicExt* GenericPlugin::clone() const TRT_NOEXCEPT {
nvinfer1::IPluginV2DynamicExt* plugin = new GenericPlugin(
proto_op_desc_, inputs_data_type_, outputs_data_type_, with_fp16_);
plugin->initialize();
return plugin;
}
void GenericPlugin::serialize(void* buffer) const TRT_NOEXCEPT {
// inputs_data_type_
SerializeValue(&buffer, inputs_data_type_);
// outputs_data_type_
SerializeValue(&buffer, outputs_data_type_);
// use fp16
SerializeValue(&buffer, with_fp16_);
// serialize op_meta_data_
std::memcpy(buffer, op_meta_data_.c_str(), op_meta_data_.size());
reinterpret_cast<char*&>(buffer) += op_meta_data_.size();
}
bool GenericPlugin::supportsFormatCombination(
int pos,
const nvinfer1::PluginTensorDesc* in_out,
int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
if (op_desc_.Type() == "gather_nd" || op_desc_.Type() == "yolo_box") {
if (pos == 0)
return (in_out[pos].type == nvinfer1::DataType::kFLOAT ||
(isFp16Supported() &&
in_out[pos].type == nvinfer1::DataType::kHALF)) &&
(in_out[pos].format == nvinfer1::TensorFormat::kLINEAR);
if (pos == 1)
return (in_out[pos].type == nvinfer1::DataType::kINT32) &&
(in_out[pos].format == nvinfer1::TensorFormat::kLINEAR);
// output
if (pos == 2)
return in_out[0].type == in_out[pos].type &&
in_out[0].format == in_out[pos].format;
} else if (op_desc_.Type() == "scatter_nd_add") {
// input X
if (pos == 0)
return (in_out[pos].type == nvinfer1::DataType::kFLOAT ||
(isFp16Supported() &&
in_out[pos].type == nvinfer1::DataType::kHALF)) &&
(in_out[pos].format == nvinfer1::TensorFormat::kLINEAR);
// input Index
if (pos == 1)
return (in_out[pos].type == nvinfer1::DataType::kINT32) &&
(in_out[pos].format == nvinfer1::TensorFormat::kLINEAR);
// input Updates
if (pos == 2)
return (in_out[pos].type == nvinfer1::DataType::kFLOAT ||
(isFp16Supported() &&
in_out[pos].type == nvinfer1::DataType::kHALF)) &&
(in_out[pos].format == nvinfer1::TensorFormat::kLINEAR);
// output
if (pos == 3)
return in_out[0].type == in_out[pos].type &&
in_out[0].format == in_out[pos].format;
} else if (op_desc_.Type() == "lookup_table_v2") {
if (pos == 0)
return (in_out[pos].type == nvinfer1::DataType::kINT32 &&
(in_out[pos].format == nvinfer1::TensorFormat::kLINEAR));
if (pos == 1)
return (in_out[pos].type == nvinfer1::DataType::kFLOAT) ||
((isFp16Supported() &&
in_out[pos].type == nvinfer1::DataType::kHALF)) &&
(in_out[pos].format == nvinfer1::TensorFormat::kLINEAR);
// output
if (pos == 2)
return in_out[1].type == in_out[pos].type &&
in_out[1].format == in_out[pos].format;
} else {
return (in_out[pos].type == nvinfer1::DataType::kFLOAT ||
(isFp16Supported() &&
in_out[pos].type == nvinfer1::DataType::kHALF)) &&
(in_out[pos].format == nvinfer1::TensorFormat::kLINEAR) &&
(in_out[0].type == in_out[pos].type);
}
}
nvinfer1::DataType GenericPlugin::getOutputDataType(
int index,
const nvinfer1::DataType* input_types,
int nb_inputs) const TRT_NOEXCEPT {
if (op_desc_.Type() == "lookup_table_v2") {
return input_types[1];
}
return input_types[0];
}
int GenericPlugin::initialize() TRT_NOEXCEPT {
std::string op_type = op_desc_.Type();
phi::KernelSignature phi_kernel_signature;
if (phi::OpUtilsMap::Instance().HasArgumentMappingFn(op_type)) {
const phi::ArgumentMappingFn* argument_mapping_func =
phi::OpUtilsMap::Instance().GetArgumentMappingFn(op_type);
PluginArgumentMappingContext argument_mapping_context(&op_desc_);
phi_kernel_signature = (*argument_mapping_func)(argument_mapping_context);
} else {
phi_kernel_signature =
phi::DefaultKernelSignatureMap::Instance().Get(op_type);
}
PADDLE_ENFORCE_EQ(
phi::KernelFactory::Instance().HasCompatiblePhiKernel(op_type),
true,
platform::errors::Fatal("%s has no compatible phi kernel!",
op_type.c_str()));
paddle::platform::DeviceContextPool& pool =
paddle::platform::DeviceContextPool::Instance();
platform::CUDAPlace place(platform::GetCurrentDeviceId());
auto* dev_ctx = static_cast<phi::GPUContext*>(pool.Get(place));
std::vector<phi::DataType> precision_types{phi::DataType::FLOAT32,
phi::DataType::FLOAT16};
for (auto& precision_type : precision_types) {
phi::KernelKey phi_kernel_key(
phi::Backend::GPU, phi::DataLayout::ANY, precision_type);
auto nv_dtype = PhiType2NvType(precision_type);
phi_kernels_[nv_dtype] = std::make_unique<phi::Kernel>(
phi::KernelFactory::Instance().SelectKernel(phi_kernel_signature.name,
phi_kernel_key));
if (phi_kernel_contexts_.find(nv_dtype) == phi_kernel_contexts_.end() ||
!phi_kernel_contexts_[nv_dtype]) {
phi_kernel_contexts_[nv_dtype] =
std::make_unique<phi::KernelContext>(dev_ctx);
BuildPhiKernelContextAttr(op_desc_,
phi_kernel_contexts_[nv_dtype].get(),
phi_kernel_signature,
phi_kernels_[nv_dtype].get());
}
}
PADDLE_ENFORCE_EQ(phi_kernels_[nvinfer1::DataType::kFLOAT]->IsValid() ||
phi_kernels_[nvinfer1::DataType::kHALF]->IsValid(),
true,
platform::errors::Fatal("%s phi kernel is invalid!.",
phi_kernel_signature.name));
if (!dense_tensor_inputs_)
dense_tensor_inputs_ = new std::vector<phi::DenseTensor>(getNbInputs());
if (!dense_tensor_outputs_)
dense_tensor_outputs_ = new std::vector<phi::DenseTensor>(getNbOutputs());
return 0;
}
nvinfer1::DimsExprs GenericPlugin::getOutputDimensions(
int output_index,
const nvinfer1::DimsExprs* inputs,
int nb_inputs,
nvinfer1::IExprBuilder& expr_builder) TRT_NOEXCEPT {
CHECK(output_index < getNbOutputs());
auto& dynamic_infermeta_factory = tensorrt::DynamicMetaFnFactory::Instance();
PADDLE_ENFORCE_EQ(dynamic_infermeta_factory.Contains(op_desc_.Type()),
true,
platform::errors::InvalidArgument(
"The %s op has no dynamic plugin infershape function!",
op_desc_.Type().c_str()));
auto* infershape_func = dynamic_infermeta_factory.Get(op_desc_.Type());
return infershape_func(
output_index, inputs, nb_inputs, expr_builder, op_desc_);
}
void GenericPlugin::configurePlugin(
const nvinfer1::DynamicPluginTensorDesc* in,
int nb_inputs,
const nvinfer1::DynamicPluginTensorDesc* out,
int nb_outputs) TRT_NOEXCEPT {
CHECK(phi_kernels_[nvinfer1::DataType::kFLOAT]->IsValid() ||
phi_kernels_[nvinfer1::DataType::kHALF]->IsValid());
CHECK(nb_inputs == getNbInputs());
CHECK(nb_outputs == getNbOutputs());
}
// Shutdown the layer. This is called when the engine is destroyed
void GenericPlugin::terminate() TRT_NOEXCEPT {
delete dense_tensor_inputs_;
delete dense_tensor_outputs_;
}
int GenericPlugin::enqueue(const nvinfer1::PluginTensorDesc* input_desc,
const nvinfer1::PluginTensorDesc* output_desc,
const void* const* inputs,
void* const* outputs,
void* workspace,
hipStream_t stream) TRT_NOEXCEPT {
platform::CUDAPlace place(platform::GetCurrentDeviceId());
// TODO(inference): generic plugin do not support INT8 precision now.
auto protoType2PhiType =
[&](int proto_type,
nvinfer1::DataType nv_dtype) -> std::pair<phi::DataType, int> {
if (proto_type ==
static_cast<int>(framework::proto::VarType_Type::VarType_Type_FP16)) {
return {phi::DataType::FLOAT16, sizeof(half)};
} else if (proto_type ==
static_cast<int>(
framework::proto::VarType_Type::VarType_Type_FP32)) {
if (isFp16Supported() && nv_dtype == nvinfer1::DataType::kHALF) {
return {phi::DataType::FLOAT16, sizeof(half)};
} else {
return {phi::DataType::FLOAT32, sizeof(float)};
}
} else if (proto_type ==
static_cast<int>(
framework::proto::VarType_Type::VarType_Type_INT64) ||
proto_type ==
static_cast<int>(
framework::proto::VarType_Type::VarType_Type_INT32)) {
return {phi::DataType::INT32, sizeof(int32_t)};
} else if (proto_type ==
static_cast<int>(
framework::proto::VarType_Type::VarType_Type_BOOL)) {
return {phi::DataType::BOOL, sizeof(bool)};
} else {
CHECK(false) << "precision is not supported";
}
};
nvinfer1::DataType data_type;
// input
if (op_desc_.Type() == "lookup_table_v2") {
data_type = input_desc[1].type;
} else {
data_type = input_desc[0].type;
}
CHECK((data_type == nvinfer1::DataType::kFLOAT) ||
(data_type == nvinfer1::DataType::kHALF));
phi_kernel_contexts_[data_type]->ClearInputOutput();
for (int i = 0; i < getNbInputs(); i++) {
auto const& input_dims = input_desc[i].dims;
std::vector<int> input_shape;
for (int j = 0; j < input_dims.nbDims; j++)
input_shape.push_back(input_dims.d[j]);
int input_numel = 1;
for (int k = 0; k < input_shape.size(); k++) input_numel *= input_shape[k];
auto data_type_and_size =
protoType2PhiType(inputs_data_type_[i], data_type);
phi::DenseTensorMeta input_meta(data_type_and_size.first,
phi::make_ddim(input_shape));
std::shared_ptr<phi::Allocation> input_alloc(
new phi::Allocation((void*)(inputs[i]), // NOLINT
input_numel * data_type_and_size.second,
place));
(*dense_tensor_inputs_)[i] =
std::move(phi::DenseTensor(input_alloc, input_meta));
phi_kernel_contexts_[data_type]->EmplaceBackInput(
&((*dense_tensor_inputs_)[i]));
}
// output
for (int i = 0; i < getNbOutputs(); i++) {
auto const& output_dims = output_desc[i].dims;
std::vector<int> output_shape;
for (int j = 0; j < output_dims.nbDims; j++)
output_shape.push_back(output_dims.d[j]);
int output_numel = 1;
for (int k = 0; k < output_shape.size(); k++)
output_numel *= output_shape[k];
auto data_type_and_size =
protoType2PhiType(inputs_data_type_[i], data_type);
phi::DenseTensorMeta output_meta(data_type_and_size.first,
phi::make_ddim(output_shape));
std::shared_ptr<phi::Allocation> output_alloc(
new phi::Allocation(reinterpret_cast<void*>(outputs[i]),
output_numel * data_type_and_size.second,
place));
(*dense_tensor_outputs_)[i] =
std::move(phi::DenseTensor(output_alloc, output_meta));
phi_kernel_contexts_[data_type]->EmplaceBackOutput(
&((*dense_tensor_outputs_)[i]));
}
CHECK_EQ(phi_kernel_contexts_[data_type]->InputsSize(), getNbInputs());
CHECK_EQ(phi_kernel_contexts_[data_type]->OutputsSize(), getNbOutputs());
(*phi_kernels_[data_type])(phi_kernel_contexts_[data_type].get());
return hipGetLastError() != hipSuccess;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| 13d6f15979f592dd41c542599243719cffb84932.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/tensorrt/plugin/generic_plugin.h"
#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/framework/op_kernel_type.h"
#include "paddle/fluid/framework/phi_utils.h"
#include "paddle/fluid/inference/tensorrt/dynamic_shape_infermeta_registry.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/compat/op_utils.h"
#include "paddle/phi/core/kernel_context.h"
#include "paddle/phi/core/kernel_factory.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
void BuildPhiKernelContextAttr(const framework::OpDesc& op_desc,
phi::KernelContext* kernel_context,
const phi::KernelSignature& signature,
const phi::Kernel* phi_kernel) {
if (!phi_kernel->IsValid()) {
return;
}
const phi::KernelArgsDef& args_def = phi_kernel->args_def();
const auto& attr_names = signature.attr_names;
const auto& attr_defs = args_def.attribute_defs();
PADDLE_ENFORCE_EQ(
attr_names.size(),
attr_defs.size(),
platform::errors::InvalidArgument(
"The attr_names.size() should be equal to attr_defs.size()."));
framework::AttrReader attr_reader(op_desc.GetAttrMap());
for (size_t k = 0; k < attr_names.size(); ++k) {
auto attr_name = attr_names[k];
auto* attr_ptr = attr_reader.GetAttr(attr_name);
if (attr_ptr) {
switch (attr_defs[k].type_index) {
case phi::AttributeType::SCALAR: {
auto& attr = *attr_ptr;
switch (AttrTypeID(attr)) {
case framework::proto::AttrType::FLOAT:
kernel_context->EmplaceBackAttr(
phi::Scalar(PADDLE_GET_CONST(float, attr)));
break;
case framework::proto::AttrType::FLOAT64:
kernel_context->EmplaceBackAttr(
phi::Scalar(PADDLE_GET_CONST(double, attr)));
break;
case framework::proto::AttrType::INT:
kernel_context->EmplaceBackAttr(
phi::Scalar(PADDLE_GET_CONST(int, attr)));
break;
case framework::proto::AttrType::LONG:
kernel_context->EmplaceBackAttr(
phi::Scalar(PADDLE_GET_CONST(int64_t, attr)));
break;
case framework::proto::AttrType::STRING:
kernel_context->EmplaceBackAttr(
phi::Scalar(PADDLE_GET_CONST(std::string, attr)));
break;
case framework::proto::AttrType::SCALAR:
kernel_context->EmplaceBackAttr(phi::Scalar(
PADDLE_GET_CONST(paddle::experimental::Scalar, attr)));
break;
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported cast op attribute `%s` to Scalar when "
"ProtoAttr2PhiAttr.",
attr_name));
}
} break;
case phi::AttributeType::INT_ARRAY: {
auto& attr = *attr_ptr;
switch (AttrTypeID(attr)) {
case framework::proto::AttrType::INTS:
kernel_context->EmplaceBackAttr(std::move(
phi::IntArray(PADDLE_GET_CONST(std::vector<int32_t>, attr))));
break;
case framework::proto::AttrType::LONGS:
kernel_context->EmplaceBackAttr(std::move(
phi::IntArray(PADDLE_GET_CONST(std::vector<int64_t>, attr))));
break;
case framework::proto::AttrType::INT:
kernel_context->EmplaceBackAttr(
phi::IntArray({PADDLE_GET_CONST(int, attr)}));
break;
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported cast op attribute `%s` to IntArray when "
"ProtoAttr2PhiAttr.",
attr_name));
}
} break;
case phi::AttributeType::SCALARS: {
auto& attr = *attr_ptr;
switch (AttrTypeID(attr)) {
case framework::proto::AttrType::INTS: {
const auto& vec = PADDLE_GET_CONST(std::vector<int32_t>, attr);
std::vector<phi::Scalar> scalar_list;
scalar_list.reserve(vec.size());
for (const auto& val : vec) {
scalar_list.emplace_back(val);
}
kernel_context->EmplaceBackAttr(std::move(scalar_list));
} break;
case framework::proto::AttrType::LONGS: {
const auto& vec = PADDLE_GET_CONST(std::vector<int64_t>, attr);
std::vector<phi::Scalar> scalar_list;
scalar_list.reserve(vec.size());
for (const auto& val : vec) {
scalar_list.emplace_back(val);
}
kernel_context->EmplaceBackAttr(std::move(scalar_list));
} break;
case framework::proto::AttrType::FLOATS: {
const auto& vec = PADDLE_GET_CONST(std::vector<float>, attr);
std::vector<phi::Scalar> scalar_list;
scalar_list.reserve(vec.size());
for (const auto& val : vec) {
scalar_list.emplace_back(val);
}
kernel_context->EmplaceBackAttr(std::move(scalar_list));
} break;
case framework::proto::AttrType::FLOAT64S: {
const auto& vec = PADDLE_GET_CONST(std::vector<double>, attr);
std::vector<phi::Scalar> scalar_list;
scalar_list.reserve(vec.size());
for (const auto& val : vec) {
scalar_list.emplace_back(val);
}
kernel_context->EmplaceBackAttr(std::move(scalar_list));
} break;
case framework::proto::AttrType::SCALARS: {
const auto& vec = PADDLE_GET_CONST(
std::vector<paddle::experimental::Scalar>, attr);
std::vector<phi::Scalar> scalar_list;
scalar_list.reserve(vec.size());
for (const auto& val : vec) {
scalar_list.emplace_back(val);
}
kernel_context->EmplaceBackAttr(std::move(scalar_list));
} break;
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported cast op attribute `%s` to vector<Scalar> when "
"ProtoAttr2PhiAttr.",
attr_name));
}
} break;
default: {
auto& attr = *attr_ptr;
switch (attr_defs[k].type_index) {
case phi::AttributeType::FLOAT32:
kernel_context->EmplaceBackAttr(PADDLE_GET_CONST(float, attr));
break;
case phi::AttributeType::INT32:
kernel_context->EmplaceBackAttr(PADDLE_GET_CONST(int, attr));
break;
case phi::AttributeType::BOOL:
kernel_context->EmplaceBackAttr(PADDLE_GET_CONST(bool, attr));
break;
case phi::AttributeType::INT64:
kernel_context->EmplaceBackAttr(PADDLE_GET_CONST(int64_t, attr));
break;
case phi::AttributeType::INT32S:
kernel_context->EmplaceBackAttr(
PADDLE_GET_CONST(std::vector<int>, attr));
break;
case phi::AttributeType::DATA_TYPE: {
auto data_type = paddle::framework::TransToPhiDataType(
static_cast<framework::proto::VarType::Type>(
PADDLE_GET_CONST(int, attr)));
kernel_context->EmplaceBackAttr(data_type);
} break;
case phi::AttributeType::STRING:
kernel_context->EmplaceBackAttr(
PADDLE_GET_CONST(std::string, attr));
break;
case phi::AttributeType::INT64S:
switch (AttrTypeID(attr)) {
case framework::proto::AttrType::LONGS:
kernel_context->EmplaceBackAttr(
PADDLE_GET_CONST(std::vector<int64_t>, attr));
break;
case framework::proto::AttrType::INTS: {
const auto& vector_int_attr =
PADDLE_GET_CONST(std::vector<int>, attr);
const std::vector<int64_t> vector_int64_attr(
vector_int_attr.begin(), vector_int_attr.end());
kernel_context->EmplaceBackAttr(vector_int64_attr);
} break;
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported cast op attribute `%s` to vector<int64_t> "
"when ProtoAttr2PhiAttr.",
attr_name));
}
break;
case phi::AttributeType::FLOAT32S:
kernel_context->EmplaceBackAttr(
PADDLE_GET_CONST(std::vector<float>, attr));
break;
case phi::AttributeType::STRINGS:
kernel_context->EmplaceBackAttr(
PADDLE_GET_CONST(std::vector<std::string>, attr));
break;
case phi::AttributeType::BOOLS:
kernel_context->EmplaceBackAttr(
PADDLE_GET_CONST(std::vector<bool>, attr));
break;
case phi::AttributeType::FLOAT64S:
kernel_context->EmplaceBackAttr(
PADDLE_GET_CONST(std::vector<double>, attr));
break;
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported cast op attribute `%s` when construct "
"ProtoAttr2PhiAttr.",
attr_name));
}
}
}
}
}
CHECK_EQ(attr_names.size(), kernel_context->AttrsSize());
}
GenericPlugin::GenericPlugin(
const paddle::framework::proto::OpDesc& proto_op_desc,
const InputOutPutVarInfo& in_out_info,
bool with_fp16) {
proto_op_desc_ = proto_op_desc;
op_desc_ = std::move(framework::OpDesc(proto_op_desc_, nullptr));
proto_op_desc_.SerializeToString(&op_meta_data_);
inputs_data_type_ = in_out_info.inputs_data_type;
outputs_data_type_ = in_out_info.outputs_data_type;
with_fp16_ = with_fp16;
}
GenericPlugin::GenericPlugin(
const paddle::framework::proto::OpDesc& proto_op_desc,
const std::vector<int>& inputs_data_type,
const std::vector<int>& outputs_data_type,
bool with_fp16) {
proto_op_desc_ = proto_op_desc;
op_desc_ = std::move(framework::OpDesc(proto_op_desc_, nullptr));
proto_op_desc_.SerializeToString(&op_meta_data_);
inputs_data_type_ = inputs_data_type;
outputs_data_type_ = outputs_data_type;
with_fp16_ = with_fp16;
}
GenericPlugin::GenericPlugin(void const* serial_data, size_t serial_length) {
DeserializeValue(&serial_data, &serial_length, &inputs_data_type_);
DeserializeValue(&serial_data, &serial_length, &outputs_data_type_);
DeserializeValue(&serial_data, &serial_length, &with_fp16_);
std::string op_meta_data((char*)(serial_data), serial_length); // NOLINT
op_meta_data_ = std::move(op_meta_data);
proto_op_desc_.ParseFromString(op_meta_data_);
op_desc_ = std::move(framework::OpDesc(proto_op_desc_, nullptr));
}
int GenericPlugin::getNbOutputs() const TRT_NOEXCEPT {
int res = 0;
for (auto& i : op_desc_.Outputs()) {
if (!i.second.empty()) res += i.second.size();
}
return res;
}
int GenericPlugin::getNbInputs() const TRT_NOEXCEPT {
int res = 0;
for (auto& i : op_desc_.Inputs()) {
if (!i.second.empty()) res += i.second.size();
}
return res;
}
nvinfer1::IPluginV2DynamicExt* GenericPlugin::clone() const TRT_NOEXCEPT {
nvinfer1::IPluginV2DynamicExt* plugin = new GenericPlugin(
proto_op_desc_, inputs_data_type_, outputs_data_type_, with_fp16_);
plugin->initialize();
return plugin;
}
void GenericPlugin::serialize(void* buffer) const TRT_NOEXCEPT {
// inputs_data_type_
SerializeValue(&buffer, inputs_data_type_);
// outputs_data_type_
SerializeValue(&buffer, outputs_data_type_);
// use fp16
SerializeValue(&buffer, with_fp16_);
// serialize op_meta_data_
std::memcpy(buffer, op_meta_data_.c_str(), op_meta_data_.size());
reinterpret_cast<char*&>(buffer) += op_meta_data_.size();
}
bool GenericPlugin::supportsFormatCombination(
int pos,
const nvinfer1::PluginTensorDesc* in_out,
int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
if (op_desc_.Type() == "gather_nd" || op_desc_.Type() == "yolo_box") {
if (pos == 0)
return (in_out[pos].type == nvinfer1::DataType::kFLOAT ||
(isFp16Supported() &&
in_out[pos].type == nvinfer1::DataType::kHALF)) &&
(in_out[pos].format == nvinfer1::TensorFormat::kLINEAR);
if (pos == 1)
return (in_out[pos].type == nvinfer1::DataType::kINT32) &&
(in_out[pos].format == nvinfer1::TensorFormat::kLINEAR);
// output
if (pos == 2)
return in_out[0].type == in_out[pos].type &&
in_out[0].format == in_out[pos].format;
} else if (op_desc_.Type() == "scatter_nd_add") {
// input X
if (pos == 0)
return (in_out[pos].type == nvinfer1::DataType::kFLOAT ||
(isFp16Supported() &&
in_out[pos].type == nvinfer1::DataType::kHALF)) &&
(in_out[pos].format == nvinfer1::TensorFormat::kLINEAR);
// input Index
if (pos == 1)
return (in_out[pos].type == nvinfer1::DataType::kINT32) &&
(in_out[pos].format == nvinfer1::TensorFormat::kLINEAR);
// input Updates
if (pos == 2)
return (in_out[pos].type == nvinfer1::DataType::kFLOAT ||
(isFp16Supported() &&
in_out[pos].type == nvinfer1::DataType::kHALF)) &&
(in_out[pos].format == nvinfer1::TensorFormat::kLINEAR);
// output
if (pos == 3)
return in_out[0].type == in_out[pos].type &&
in_out[0].format == in_out[pos].format;
} else if (op_desc_.Type() == "lookup_table_v2") {
if (pos == 0)
return (in_out[pos].type == nvinfer1::DataType::kINT32 &&
(in_out[pos].format == nvinfer1::TensorFormat::kLINEAR));
if (pos == 1)
return (in_out[pos].type == nvinfer1::DataType::kFLOAT) ||
((isFp16Supported() &&
in_out[pos].type == nvinfer1::DataType::kHALF)) &&
(in_out[pos].format == nvinfer1::TensorFormat::kLINEAR);
// output
if (pos == 2)
return in_out[1].type == in_out[pos].type &&
in_out[1].format == in_out[pos].format;
} else {
return (in_out[pos].type == nvinfer1::DataType::kFLOAT ||
(isFp16Supported() &&
in_out[pos].type == nvinfer1::DataType::kHALF)) &&
(in_out[pos].format == nvinfer1::TensorFormat::kLINEAR) &&
(in_out[0].type == in_out[pos].type);
}
}
nvinfer1::DataType GenericPlugin::getOutputDataType(
int index,
const nvinfer1::DataType* input_types,
int nb_inputs) const TRT_NOEXCEPT {
if (op_desc_.Type() == "lookup_table_v2") {
return input_types[1];
}
return input_types[0];
}
int GenericPlugin::initialize() TRT_NOEXCEPT {
std::string op_type = op_desc_.Type();
phi::KernelSignature phi_kernel_signature;
if (phi::OpUtilsMap::Instance().HasArgumentMappingFn(op_type)) {
const phi::ArgumentMappingFn* argument_mapping_func =
phi::OpUtilsMap::Instance().GetArgumentMappingFn(op_type);
PluginArgumentMappingContext argument_mapping_context(&op_desc_);
phi_kernel_signature = (*argument_mapping_func)(argument_mapping_context);
} else {
phi_kernel_signature =
phi::DefaultKernelSignatureMap::Instance().Get(op_type);
}
PADDLE_ENFORCE_EQ(
phi::KernelFactory::Instance().HasCompatiblePhiKernel(op_type),
true,
platform::errors::Fatal("%s has no compatible phi kernel!",
op_type.c_str()));
paddle::platform::DeviceContextPool& pool =
paddle::platform::DeviceContextPool::Instance();
platform::CUDAPlace place(platform::GetCurrentDeviceId());
auto* dev_ctx = static_cast<phi::GPUContext*>(pool.Get(place));
std::vector<phi::DataType> precision_types{phi::DataType::FLOAT32,
phi::DataType::FLOAT16};
for (auto& precision_type : precision_types) {
phi::KernelKey phi_kernel_key(
phi::Backend::GPU, phi::DataLayout::ANY, precision_type);
auto nv_dtype = PhiType2NvType(precision_type);
phi_kernels_[nv_dtype] = std::make_unique<phi::Kernel>(
phi::KernelFactory::Instance().SelectKernel(phi_kernel_signature.name,
phi_kernel_key));
if (phi_kernel_contexts_.find(nv_dtype) == phi_kernel_contexts_.end() ||
!phi_kernel_contexts_[nv_dtype]) {
phi_kernel_contexts_[nv_dtype] =
std::make_unique<phi::KernelContext>(dev_ctx);
BuildPhiKernelContextAttr(op_desc_,
phi_kernel_contexts_[nv_dtype].get(),
phi_kernel_signature,
phi_kernels_[nv_dtype].get());
}
}
PADDLE_ENFORCE_EQ(phi_kernels_[nvinfer1::DataType::kFLOAT]->IsValid() ||
phi_kernels_[nvinfer1::DataType::kHALF]->IsValid(),
true,
platform::errors::Fatal("%s phi kernel is invalid!.",
phi_kernel_signature.name));
if (!dense_tensor_inputs_)
dense_tensor_inputs_ = new std::vector<phi::DenseTensor>(getNbInputs());
if (!dense_tensor_outputs_)
dense_tensor_outputs_ = new std::vector<phi::DenseTensor>(getNbOutputs());
return 0;
}
nvinfer1::DimsExprs GenericPlugin::getOutputDimensions(
int output_index,
const nvinfer1::DimsExprs* inputs,
int nb_inputs,
nvinfer1::IExprBuilder& expr_builder) TRT_NOEXCEPT {
CHECK(output_index < getNbOutputs());
auto& dynamic_infermeta_factory = tensorrt::DynamicMetaFnFactory::Instance();
PADDLE_ENFORCE_EQ(dynamic_infermeta_factory.Contains(op_desc_.Type()),
true,
platform::errors::InvalidArgument(
"The %s op has no dynamic plugin infershape function!",
op_desc_.Type().c_str()));
auto* infershape_func = dynamic_infermeta_factory.Get(op_desc_.Type());
return infershape_func(
output_index, inputs, nb_inputs, expr_builder, op_desc_);
}
void GenericPlugin::configurePlugin(
const nvinfer1::DynamicPluginTensorDesc* in,
int nb_inputs,
const nvinfer1::DynamicPluginTensorDesc* out,
int nb_outputs) TRT_NOEXCEPT {
CHECK(phi_kernels_[nvinfer1::DataType::kFLOAT]->IsValid() ||
phi_kernels_[nvinfer1::DataType::kHALF]->IsValid());
CHECK(nb_inputs == getNbInputs());
CHECK(nb_outputs == getNbOutputs());
}
// Shutdown the layer. This is called when the engine is destroyed
void GenericPlugin::terminate() TRT_NOEXCEPT {
delete dense_tensor_inputs_;
delete dense_tensor_outputs_;
}
int GenericPlugin::enqueue(const nvinfer1::PluginTensorDesc* input_desc,
const nvinfer1::PluginTensorDesc* output_desc,
const void* const* inputs,
void* const* outputs,
void* workspace,
cudaStream_t stream) TRT_NOEXCEPT {
platform::CUDAPlace place(platform::GetCurrentDeviceId());
// TODO(inference): generic plugin do not support INT8 precision now.
auto protoType2PhiType =
[&](int proto_type,
nvinfer1::DataType nv_dtype) -> std::pair<phi::DataType, int> {
if (proto_type ==
static_cast<int>(framework::proto::VarType_Type::VarType_Type_FP16)) {
return {phi::DataType::FLOAT16, sizeof(half)};
} else if (proto_type ==
static_cast<int>(
framework::proto::VarType_Type::VarType_Type_FP32)) {
if (isFp16Supported() && nv_dtype == nvinfer1::DataType::kHALF) {
return {phi::DataType::FLOAT16, sizeof(half)};
} else {
return {phi::DataType::FLOAT32, sizeof(float)};
}
} else if (proto_type ==
static_cast<int>(
framework::proto::VarType_Type::VarType_Type_INT64) ||
proto_type ==
static_cast<int>(
framework::proto::VarType_Type::VarType_Type_INT32)) {
return {phi::DataType::INT32, sizeof(int32_t)};
} else if (proto_type ==
static_cast<int>(
framework::proto::VarType_Type::VarType_Type_BOOL)) {
return {phi::DataType::BOOL, sizeof(bool)};
} else {
CHECK(false) << "precision is not supported";
}
};
nvinfer1::DataType data_type;
// input
if (op_desc_.Type() == "lookup_table_v2") {
data_type = input_desc[1].type;
} else {
data_type = input_desc[0].type;
}
CHECK((data_type == nvinfer1::DataType::kFLOAT) ||
(data_type == nvinfer1::DataType::kHALF));
phi_kernel_contexts_[data_type]->ClearInputOutput();
for (int i = 0; i < getNbInputs(); i++) {
auto const& input_dims = input_desc[i].dims;
std::vector<int> input_shape;
for (int j = 0; j < input_dims.nbDims; j++)
input_shape.push_back(input_dims.d[j]);
int input_numel = 1;
for (int k = 0; k < input_shape.size(); k++) input_numel *= input_shape[k];
auto data_type_and_size =
protoType2PhiType(inputs_data_type_[i], data_type);
phi::DenseTensorMeta input_meta(data_type_and_size.first,
phi::make_ddim(input_shape));
std::shared_ptr<phi::Allocation> input_alloc(
new phi::Allocation((void*)(inputs[i]), // NOLINT
input_numel * data_type_and_size.second,
place));
(*dense_tensor_inputs_)[i] =
std::move(phi::DenseTensor(input_alloc, input_meta));
phi_kernel_contexts_[data_type]->EmplaceBackInput(
&((*dense_tensor_inputs_)[i]));
}
// output
for (int i = 0; i < getNbOutputs(); i++) {
auto const& output_dims = output_desc[i].dims;
std::vector<int> output_shape;
for (int j = 0; j < output_dims.nbDims; j++)
output_shape.push_back(output_dims.d[j]);
int output_numel = 1;
for (int k = 0; k < output_shape.size(); k++)
output_numel *= output_shape[k];
auto data_type_and_size =
protoType2PhiType(inputs_data_type_[i], data_type);
phi::DenseTensorMeta output_meta(data_type_and_size.first,
phi::make_ddim(output_shape));
std::shared_ptr<phi::Allocation> output_alloc(
new phi::Allocation(reinterpret_cast<void*>(outputs[i]),
output_numel * data_type_and_size.second,
place));
(*dense_tensor_outputs_)[i] =
std::move(phi::DenseTensor(output_alloc, output_meta));
phi_kernel_contexts_[data_type]->EmplaceBackOutput(
&((*dense_tensor_outputs_)[i]));
}
CHECK_EQ(phi_kernel_contexts_[data_type]->InputsSize(), getNbInputs());
CHECK_EQ(phi_kernel_contexts_[data_type]->OutputsSize(), getNbOutputs());
(*phi_kernels_[data_type])(phi_kernel_contexts_[data_type].get());
return cudaGetLastError() != cudaSuccess;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
8adddad46bd6aa5372e1fb9b349d7e25af00bb1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <hip/hip_fp16.h>
#include <stdint.h> // uint8_t
#include <cassert>
// ====================
// Greedy search
// ====================
// Implementing vanilla greedy search in the device
__global__ void greedySearch_ker(int32_t* symbolArr,
int32_t* num_symbols_current_step,
int32_t* encIdx,
int32_t* seqLen,
bool* isNotBlank,
int32_t* outSeq,
int32_t* outSeqLen,
int32_t* done,
int batchSize,
int iter,
int _BLANK_,
int hp_max_symbols_per_step,
int maxLength) {
int bs = blockIdx.x * blockDim.x + threadIdx.x;
if (bs >= batchSize) return;
/* for (size_t bs = 0; bs < actualBatchSize ; bs++)
{
// Get current winner symbol
int32_t winner_symbol = symbolArr[bs];
// Update state based on the outcome
if(winner_symbol != _BLANK_ && num_symbols_current_step[bs] < FLAGS_hp_max_symbols_per_step)
{
isNotBlank[bs] = true;
lastSymbol[bs] = winner_symbol;
// Note that here we do not update the time pointer because
// we want to generate more symbols from the predictor
// FLAGS_always_advance_time: update time pointers as a hack if we do not get real data
if(FLAGS_always_advance_time)
{
if(encIdx[bs] < seqLen[bs])
{
encIdx[bs]++;
num_symbols_current_step[bs] = 0;
}
}
// Update output
outSeq[bs].push(winner_symbol);
num_symbols_current_step[bs]++;
// std::cout << "[blank=false] BS : " << bs << " t=" << encIdx[bs] << " winner symbol:[" << winner_symbol << "]" << std::endl;
// outSeq[bs].print("output");
}
else // winner_symbol == _BLANK_
{
isNotBlank[bs] = false;
// Note that here we do not update the inputs to the predictor
// because we will do the prediction again (brute force) and we
// want the same outcome
// update time pointer
if(encIdx[bs] < seqLen[bs])
{
encIdx[bs]++;
num_symbols_current_step[bs] = 0;
}
}
if(encIdx[bs] < seqLen[bs]) done = false;
} */
// Get current winner symbol
int32_t winner_symbol = symbolArr[bs];
// Update state based on the outcome
// if(winner_symbol != _BLANK_ && num_symbols_current_step[bs] < hp_max_symbols_per_step)
if(winner_symbol != _BLANK_ && num_symbols_current_step[bs] < hp_max_symbols_per_step && (encIdx[bs] < seqLen[bs]))
{
isNotBlank[bs] = true;
// Note that here we do not update the time pointer because
// we want to generate more symbols from the predict
// Update output
outSeq[bs * maxLength + outSeqLen[bs]] = winner_symbol; // TODO: Fix bad access pattern
outSeqLen[bs]++;
num_symbols_current_step[bs]++;
// std::cout << "[blank=false] BS : " << bs << " t=" << encIdx[bs] << " winner symbol:[" << winner_symbol << "]" << std::endl;
// outSeq[bs].print("output");
}
else // winner_symbol == _BLANK_
{
isNotBlank[bs] = false;
// Note that here we do not update the inputs to the predictor
// because we will do the prediction again (brute force) and we
// want the same outcome
// update time pointer
if(encIdx[bs] < seqLen[bs]) {
encIdx[bs]++;
num_symbols_current_step[bs] = 0;
}
}
// check if these batch instance is done (consumer the whole encoder input)
// TODO: Hammering *done from many threads in a block. Maybe do a _any across the warp/CTA.
if(encIdx[bs] < seqLen[bs]) {
// printf("not done\n");
done[iter] = 0;
}
}
void greedySearch(int32_t* symbolArr,
int32_t* num_symbols_current_step,
int32_t* encIdx,
int32_t* seqLen,
bool* isNotBlank,
int32_t* outSeq,
int32_t* outSeqLen,
int32_t* done,
int batchSize,
int iter,
int _BLANK_,
int hp_max_symbols_per_step,
int maxLength,
hipStream_t stream) {
dim3 blockDim = dim3(128, 1, 1);
dim3 gridDim = dim3((batchSize + blockDim.x - 1) / blockDim.x, 1, 1);
hipLaunchKernelGGL(( greedySearch_ker) , dim3(gridDim), dim3(blockDim), 0, stream , symbolArr,
num_symbols_current_step,
encIdx,
seqLen,
isNotBlank,
outSeq,
outSeqLen,
done,
batchSize,
iter,
_BLANK_,
hp_max_symbols_per_step,
maxLength);
}
// ====================
// Sparse Initializers
// ====================
// A family of methods to implement conditional/sparse memsets to state/cell tensors in the LSTMs of the models
// void InitializeEncoderSparse(bool *sparseMask, size_t actualBatchSize)
// {
// size_t encPreSize = FLAGS_hp_enc_pre_rnn_layers * FLAGS_hp_encoder_hidden_size * esize;
// size_t encPostSize = FLAGS_hp_enc_post_rnn_layers * FLAGS_hp_encoder_hidden_size * esize;
//
// int8_t* pEncoderPreHidden = (int8_t*) encoderPreHidden->data();
// int8_t* pEncoderPreCell = (int8_t*) encoderPreCell->data();
// int8_t* pEncoderPostHidden = (int8_t*) encoderPostHidden->data();
// int8_t* pEncoderPostCell = (int8_t*) encoderPostCell->data();
//
// // Apply initialization element by element (preserve content when sparseMask is true)
// for (size_t bs = 0 ; bs < actualBatchSize ; bs++)
// {
// if(sparseMask[bs] == false) {
// CHECK_EQ(hipMemset(pEncoderPreHidden, 0, encPreSize), hipSuccess);
// CHECK_EQ(hipMemset(pEncoderPreCell, 0, encPreSize), hipSuccess);
// CHECK_EQ(hipMemset(pEncoderPostHidden, 0, encPostSize), hipSuccess);
// CHECK_EQ(hipMemset(pEncoderPostCell, 0, encPostSize), hipSuccess);
// }
//
// // update pointers
// pEncoderPreHidden += encPreSize;
// pEncoderPreCell += encPreSize;
// pEncoderPostHidden += encPostSize;
// pEncoderPostCell += encPostSize;
// }
// }
// Given tensors of size batchSize*int32
// [0] ptr = devBuffer + bs * int32
// [1] Set to zero is sparseMask[i] == true
// [2] Update devBuffer pointer with stride, got to step [1]
__global__ void rnnt_sparse_memset_ker(int32_t* buffer,
bool* sparseMask,
int iter,
int stride,
int numThreads)
{
int thr = blockIdx.x * blockDim.x + threadIdx.x;
if (thr >= numThreads) return;
int i;
int32_t index = thr;
for(i=0; i<iter; i++) {
if(sparseMask[i] == false) {
buffer[index] = 0;
}
index += stride;
}
}
void rnntSparseMemSet(uint8_t* devBuffer,
bool* sparseMask,
int sizeBytes,
int batchSize,
hipStream_t stream) {
int32_t *buffer32 = (int32_t*)devBuffer;
int iter = batchSize;
int numThreads = sizeBytes / sizeof(int32_t);
int32_t stride = sizeBytes / sizeof(int32_t);
dim3 blockDim = dim3(128, 1, 1);
dim3 gridDim = dim3((numThreads + blockDim.x - 1) / blockDim.x, 1, 1);
// printf("rnntSparseMemSet(size=%d,bs=%d): iter=%d numThreads=%d \n",sizeBytes, batchSize, iter, numThreads);
hipLaunchKernelGGL(( rnnt_sparse_memset_ker) , dim3(gridDim), dim3(blockDim), 0, stream , buffer32,
sparseMask,
iter,
stride,
numThreads);
}
// ====================
// Joint FC1+top1
// ====================
// Implementing FC1_SUM + RELU + FC2 + TOP1 into a single cuda kernel
#if 1
template <int K, int NUM_WARPS, int ROWS_PER_WARP, int COLS_PER_BLOCK, bool save_intermediate>
__launch_bounds__(NUM_WARPS*32,2)
__global__ void fc2_top1_ker(half2* A, half2* B1, half2*B2, half* C, half* d_bias, int32_t* top1) {
const int K2 = K/2;
half2 accum[COLS_PER_BLOCK][ROWS_PER_WARP];
int warp_id = threadIdx.x / 32;
int tid = threadIdx.x % 32;
int warp_row = warp_id * ROWS_PER_WARP;
// Delay some warps in order to prevent overwhelming LSU
/*
uint64_t time = clock64();
while (clock64() - time < warp_id*1200);
*/
int sample_id = blockIdx.x*COLS_PER_BLOCK;
for (int r=0; r<ROWS_PER_WARP; r++) {
for (int ni=0; ni<COLS_PER_BLOCK; ni++) {
accum[ni][r].x = 0;
accum[ni][r].y = 0;
}
}
bool pred[ROWS_PER_WARP];
for (int r=0; r<ROWS_PER_WARP; r++) {
pred[r] = warp_row + r < 29;
}
half2 a[ROWS_PER_WARP][K2/32];
half2 b[COLS_PER_BLOCK][K2/32];
half bias[ROWS_PER_WARP];
#pragma unroll
for (int i=0; i<K2/32; i++) {
#pragma unroll
for (int mi=0; mi<ROWS_PER_WARP; mi++) {
int row = warp_row + mi;
if (pred[mi]) a[mi][i] = A[row*K2 + i*32 + tid];
}
#pragma unroll
for (int ni=0; ni<COLS_PER_BLOCK; ni++) {
// apply here element_wise: RELU(SUM(B1, B2))
// b[ni][i] = B[(sample_id+ni)*K2+i*32+tid];
b[ni][i] = B1[(sample_id+ni)*K2+i*32+tid] + B2[(sample_id+ni)*K2+i*32+tid];
// RELU: if (b[ni][i] < 0.0) b[ni][i] = 0.0;
b[ni][i] = b[ni][i] * __hgeu2(b[ni][i], __float2half2_rn(0.0) );
}
}
for (int mi=0; mi<ROWS_PER_WARP; mi++) {
int row = warp_row + mi;
if (pred[mi]) bias[mi] = d_bias[row];
}
#pragma unroll
for (int mi=0; mi<ROWS_PER_WARP; mi++) {
#pragma unroll
for (int ni=0; ni<COLS_PER_BLOCK; ni++) {
#pragma unroll
for (int i=0; i<K2/32; i++) {
accum[ni][mi] += a[mi][i] * b[ni][i];
}
}
}
__shared__ float result[COLS_PER_BLOCK][32];
#pragma unroll
for (int r=0; r<ROWS_PER_WARP; r++) {
#pragma unroll
for (int ni=0; ni<COLS_PER_BLOCK; ni++) {
// Warp reduce
for (int offset=16; offset>0; offset /= 2) {
accum[ni][r] += __shfl_down_sync(0xFFFFFFFF,accum[ni][r],offset);
}
half val = accum[ni][r].x + accum[ni][r].y;
val += bias[r];
// printf("[%f]",__half2float(val)); // DEBUG
if (save_intermediate && tid == 0 && (warp_row+r<29)) C[(sample_id+ni)*29+warp_row+r] = val;
if (tid == 0) result[ni][warp_id*ROWS_PER_WARP + r] = __half2float(val);
}
}
__syncthreads();
if (warp_id == 0) {
for (int ni=0; ni<COLS_PER_BLOCK; ni++) {
float val = result[ni][threadIdx.x];
int idx = (threadIdx.x<29) ? (int)threadIdx.x : -1;
for (int offset=16; offset>0; offset /= 2) {
int other_idx = __shfl_down_sync(0xFFFFFFFF,idx,offset);
float other_val = __shfl_down_sync(0xFFFFFFFF,val,offset);
if (idx == -1 || (other_idx != -1 && other_val > val)) {
idx = other_idx;
val = other_val;
}
}
if (threadIdx.x == 0) {
top1[sample_id+ni] = idx;
// printf("{%d,%d}:: [sample_id=%d][ni=%d]: %d\n",blockIdx.x, threadIdx.x, sample_id,ni,idx); // DEBUG
}
}
}
}
// void rnntFc2Top1(half2* devFc1EncoderBuffer, // activation #1 (from fc1 encoder)
// half2* devFc1DecoderBuffer, // activation #2 (from fc1 decoder)
// half2* devFc1WeightsBuffer, // FC2 weights
// half* devFc1BiasBuffer, // FC2 bias
// half* devFc2OutputBuffer, // transient FC2 output
// int32_t* devTop1Buffer, // Top1 output
// int batchSize,
// hipStream_t stream) {
void rnntFc2Top1(uint8_t* devFc1EncoderBuffer, // activation #1 (from fc1 encoder)
uint8_t* devFc1DecoderBuffer, // activation #2 (from fc1 decoder)
uint8_t* devFc1WeightsBuffer, // FC2 weights
uint8_t* devFc1BiasBuffer, // FC2 bias
int32_t* devFc2OutputBuffer, // transient FC2 output
int32_t* devTop1Buffer, // Top1 output
int batchSize,
hipStream_t stream) {
// static parameters
// const int K = 1024;
const int K = 512; // fc2 input size (i.e., fc1 output size)
const int M = 32;
const int NUM_WARPS = 4;
const int ROWS_PER_WARP = M/NUM_WARPS;
const int COLS_PER_BLOCK = 1;
const bool save_intermediate = false;
// process parameters
int num_blocks = batchSize / COLS_PER_BLOCK;
// cuda kernel
// printf("fc2_top1_kernel<K=%d, NUM_WARPS=%d, ROWS_PER_WARP=%d, COLS_PER_BLOCK=%d><<<%d,%d>>>",K, NUM_WARPS, ROWS_PER_WARP, COLS_PER_BLOCK, num_blocks, NUM_WARPS*32);
hipLaunchKernelGGL(( fc2_top1_ker<K, NUM_WARPS, ROWS_PER_WARP, COLS_PER_BLOCK, save_intermediate>) , dim3(num_blocks),dim3(NUM_WARPS*32), 0, stream,
(half2*) devFc1WeightsBuffer,
(half2*) devFc1EncoderBuffer,
(half2*) devFc1DecoderBuffer,
(half*) devFc2OutputBuffer,
(half*) devFc1BiasBuffer,
(int32_t*) devTop1Buffer);
}
#endif
// ====================
// Encoder gather
// ====================
// Implementing RnntGather for the FC1 encoder input in joint network
//
// ec.igather->step(actualBatchSize, mainStream,
// tc.encoderOut[pol],
// tc.encIdx,
// tc.encGather);
__global__ void rnnt_enc_gather_ker(int32_t* outBuffer, // [bs][seq_len][inner_stride]
int32_t* idxVec, // [bs]
int32_t* inBuffer, // [bs][inner_stride]
int32_t inner_iter,
int32_t inner_stride,
int32_t outer_iter,
int32_t outer_stride,
size_t max_seq_length,
int batchSize)
{
int inner_offset = blockDim.x;
int bs = blockIdx.x * outer_iter;
// outer loop ([bs])
for(int i = 0; i < outer_iter ; i++, bs++)
{
if(bs >= batchSize) continue;
// channel is based on thread Idx
int ch = threadIdx.x;
// get index for the input
int32_t idx = idxVec[bs];
// base pointers
int32_t *pIn = inBuffer + (bs * outer_stride) + (idx * inner_stride);
int32_t *pOut = outBuffer + (bs * inner_stride);
// inner loop ([ch])
for(int j = 0; j < inner_iter ; j++, ch+=inner_offset)
{
if(ch >= inner_stride) continue;
// return zero is out of bounds
if (idx >= max_seq_length) {
pOut[ch] = 0;
continue;
}
// transfer
pOut[ch] = pIn[ch];
}
}
}
void rnntIgatherStep(
uint8_t* devRnntEncoderBuffer, // input from encoder RNNT [bs][seq_len][chan]
int32_t* devEncIdxBuffer, // vector of indexes [bs], int32
uint8_t* devEncGatherBuffer, // output to be consumed by joint::fc1 [bs][chan]
size_t eSize, // element size
size_t encoderChannels, // channel size (hp_encoder_hidden_size)
size_t seqLength, // sequence length (hp_max_seq_length)
int batchSize,
hipStream_t stream) {
// tiling approach: (example)
// block dimension = 128
// inner iter = encoder channel size / 128 (e.g. 512*2/4 over 128 = 2)
// grid dimension = 128
// outer iter = (batchSize / grid_dimension)'
// static parameters
const int BLOCK_DIM = 128;
const int GRID_DIM = 2048;
// inner iterations
int blockSize = encoderChannels * eSize;
int blockElements = blockSize / sizeof(int32_t);
// assert(blockElements > BLOCK_DIM);
int inner_iter = blockElements / BLOCK_DIM;
// outer iterations
int grid_elements = (batchSize > GRID_DIM)? GRID_DIM : batchSize;
int outer_iter = (batchSize + grid_elements -1) / grid_elements;
// dim3 gridDim = dim3((numThreads + blockDim.x - 1) / blockDim.x, 1, 1);
dim3 blockDim = dim3(BLOCK_DIM, 1, 1);
dim3 gridDim = dim3(grid_elements, 1, 1);
// strides
int32_t inner_stride = blockElements;
int32_t outer_stride = seqLength * inner_stride;
// printf("rnntIgatherStep(bs=%d, esize=%d,encChan=%d,seqLen=%d)<%d,%d>: inner_iter=%d inner_stride=%d outer_iter=%d outer_stride=%d\n ", batchSize, eSize, encoderChannels, seqLength, BLOCK_DIM, grid_elements, inner_iter, inner_stride, outer_iter, outer_stride);
hipLaunchKernelGGL(( rnnt_enc_gather_ker) , dim3(gridDim), dim3(blockDim), 0, stream , (int32_t*) devEncGatherBuffer,
(int32_t*) devEncIdxBuffer,
(int32_t*) devRnntEncoderBuffer,
inner_iter,
inner_stride,
outer_iter,
outer_stride,
seqLength,
batchSize);
}
| 8adddad46bd6aa5372e1fb9b349d7e25af00bb1c.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <cuda_fp16.h>
#include <stdint.h> // uint8_t
#include <cassert>
// ====================
// Greedy search
// ====================
// Implementing vanilla greedy search in the device
__global__ void greedySearch_ker(int32_t* symbolArr,
int32_t* num_symbols_current_step,
int32_t* encIdx,
int32_t* seqLen,
bool* isNotBlank,
int32_t* outSeq,
int32_t* outSeqLen,
int32_t* done,
int batchSize,
int iter,
int _BLANK_,
int hp_max_symbols_per_step,
int maxLength) {
int bs = blockIdx.x * blockDim.x + threadIdx.x;
if (bs >= batchSize) return;
/* for (size_t bs = 0; bs < actualBatchSize ; bs++)
{
// Get current winner symbol
int32_t winner_symbol = symbolArr[bs];
// Update state based on the outcome
if(winner_symbol != _BLANK_ && num_symbols_current_step[bs] < FLAGS_hp_max_symbols_per_step)
{
isNotBlank[bs] = true;
lastSymbol[bs] = winner_symbol;
// Note that here we do not update the time pointer because
// we want to generate more symbols from the predictor
// FLAGS_always_advance_time: update time pointers as a hack if we do not get real data
if(FLAGS_always_advance_time)
{
if(encIdx[bs] < seqLen[bs])
{
encIdx[bs]++;
num_symbols_current_step[bs] = 0;
}
}
// Update output
outSeq[bs].push(winner_symbol);
num_symbols_current_step[bs]++;
// std::cout << "[blank=false] BS : " << bs << " t=" << encIdx[bs] << " winner symbol:[" << winner_symbol << "]" << std::endl;
// outSeq[bs].print("output");
}
else // winner_symbol == _BLANK_
{
isNotBlank[bs] = false;
// Note that here we do not update the inputs to the predictor
// because we will do the prediction again (brute force) and we
// want the same outcome
// update time pointer
if(encIdx[bs] < seqLen[bs])
{
encIdx[bs]++;
num_symbols_current_step[bs] = 0;
}
}
if(encIdx[bs] < seqLen[bs]) done = false;
} */
// Get current winner symbol
int32_t winner_symbol = symbolArr[bs];
// Update state based on the outcome
// if(winner_symbol != _BLANK_ && num_symbols_current_step[bs] < hp_max_symbols_per_step)
if(winner_symbol != _BLANK_ && num_symbols_current_step[bs] < hp_max_symbols_per_step && (encIdx[bs] < seqLen[bs]))
{
isNotBlank[bs] = true;
// Note that here we do not update the time pointer because
// we want to generate more symbols from the predict
// Update output
outSeq[bs * maxLength + outSeqLen[bs]] = winner_symbol; // TODO: Fix bad access pattern
outSeqLen[bs]++;
num_symbols_current_step[bs]++;
// std::cout << "[blank=false] BS : " << bs << " t=" << encIdx[bs] << " winner symbol:[" << winner_symbol << "]" << std::endl;
// outSeq[bs].print("output");
}
else // winner_symbol == _BLANK_
{
isNotBlank[bs] = false;
// Note that here we do not update the inputs to the predictor
// because we will do the prediction again (brute force) and we
// want the same outcome
// update time pointer
if(encIdx[bs] < seqLen[bs]) {
encIdx[bs]++;
num_symbols_current_step[bs] = 0;
}
}
// check if these batch instance is done (consumer the whole encoder input)
// TODO: Hammering *done from many threads in a block. Maybe do a _any across the warp/CTA.
if(encIdx[bs] < seqLen[bs]) {
// printf("not done\n");
done[iter] = 0;
}
}
void greedySearch(int32_t* symbolArr,
int32_t* num_symbols_current_step,
int32_t* encIdx,
int32_t* seqLen,
bool* isNotBlank,
int32_t* outSeq,
int32_t* outSeqLen,
int32_t* done,
int batchSize,
int iter,
int _BLANK_,
int hp_max_symbols_per_step,
int maxLength,
cudaStream_t stream) {
dim3 blockDim = dim3(128, 1, 1);
dim3 gridDim = dim3((batchSize + blockDim.x - 1) / blockDim.x, 1, 1);
greedySearch_ker <<< gridDim, blockDim, 0, stream >>> (symbolArr,
num_symbols_current_step,
encIdx,
seqLen,
isNotBlank,
outSeq,
outSeqLen,
done,
batchSize,
iter,
_BLANK_,
hp_max_symbols_per_step,
maxLength);
}
// ====================
// Sparse Initializers
// ====================
// A family of methods to implement conditional/sparse memsets to state/cell tensors in the LSTMs of the models
// void InitializeEncoderSparse(bool *sparseMask, size_t actualBatchSize)
// {
// size_t encPreSize = FLAGS_hp_enc_pre_rnn_layers * FLAGS_hp_encoder_hidden_size * esize;
// size_t encPostSize = FLAGS_hp_enc_post_rnn_layers * FLAGS_hp_encoder_hidden_size * esize;
//
// int8_t* pEncoderPreHidden = (int8_t*) encoderPreHidden->data();
// int8_t* pEncoderPreCell = (int8_t*) encoderPreCell->data();
// int8_t* pEncoderPostHidden = (int8_t*) encoderPostHidden->data();
// int8_t* pEncoderPostCell = (int8_t*) encoderPostCell->data();
//
// // Apply initialization element by element (preserve content when sparseMask is true)
// for (size_t bs = 0 ; bs < actualBatchSize ; bs++)
// {
// if(sparseMask[bs] == false) {
// CHECK_EQ(cudaMemset(pEncoderPreHidden, 0, encPreSize), cudaSuccess);
// CHECK_EQ(cudaMemset(pEncoderPreCell, 0, encPreSize), cudaSuccess);
// CHECK_EQ(cudaMemset(pEncoderPostHidden, 0, encPostSize), cudaSuccess);
// CHECK_EQ(cudaMemset(pEncoderPostCell, 0, encPostSize), cudaSuccess);
// }
//
// // update pointers
// pEncoderPreHidden += encPreSize;
// pEncoderPreCell += encPreSize;
// pEncoderPostHidden += encPostSize;
// pEncoderPostCell += encPostSize;
// }
// }
// Given tensors of size batchSize*int32
// [0] ptr = devBuffer + bs * int32
// [1] Set to zero is sparseMask[i] == true
// [2] Update devBuffer pointer with stride, got to step [1]
__global__ void rnnt_sparse_memset_ker(int32_t* buffer,
bool* sparseMask,
int iter,
int stride,
int numThreads)
{
int thr = blockIdx.x * blockDim.x + threadIdx.x;
if (thr >= numThreads) return;
int i;
int32_t index = thr;
for(i=0; i<iter; i++) {
if(sparseMask[i] == false) {
buffer[index] = 0;
}
index += stride;
}
}
void rnntSparseMemSet(uint8_t* devBuffer,
bool* sparseMask,
int sizeBytes,
int batchSize,
cudaStream_t stream) {
int32_t *buffer32 = (int32_t*)devBuffer;
int iter = batchSize;
int numThreads = sizeBytes / sizeof(int32_t);
int32_t stride = sizeBytes / sizeof(int32_t);
dim3 blockDim = dim3(128, 1, 1);
dim3 gridDim = dim3((numThreads + blockDim.x - 1) / blockDim.x, 1, 1);
// printf("rnntSparseMemSet(size=%d,bs=%d): iter=%d numThreads=%d \n",sizeBytes, batchSize, iter, numThreads);
rnnt_sparse_memset_ker <<< gridDim, blockDim, 0, stream >>> (buffer32,
sparseMask,
iter,
stride,
numThreads);
}
// ====================
// Joint FC1+top1
// ====================
// Implementing FC1_SUM + RELU + FC2 + TOP1 into a single cuda kernel
#if 1
template <int K, int NUM_WARPS, int ROWS_PER_WARP, int COLS_PER_BLOCK, bool save_intermediate>
__launch_bounds__(NUM_WARPS*32,2)
__global__ void fc2_top1_ker(half2* A, half2* B1, half2*B2, half* C, half* d_bias, int32_t* top1) {
const int K2 = K/2;
half2 accum[COLS_PER_BLOCK][ROWS_PER_WARP];
int warp_id = threadIdx.x / 32;
int tid = threadIdx.x % 32;
int warp_row = warp_id * ROWS_PER_WARP;
// Delay some warps in order to prevent overwhelming LSU
/*
uint64_t time = clock64();
while (clock64() - time < warp_id*1200);
*/
int sample_id = blockIdx.x*COLS_PER_BLOCK;
for (int r=0; r<ROWS_PER_WARP; r++) {
for (int ni=0; ni<COLS_PER_BLOCK; ni++) {
accum[ni][r].x = 0;
accum[ni][r].y = 0;
}
}
bool pred[ROWS_PER_WARP];
for (int r=0; r<ROWS_PER_WARP; r++) {
pred[r] = warp_row + r < 29;
}
half2 a[ROWS_PER_WARP][K2/32];
half2 b[COLS_PER_BLOCK][K2/32];
half bias[ROWS_PER_WARP];
#pragma unroll
for (int i=0; i<K2/32; i++) {
#pragma unroll
for (int mi=0; mi<ROWS_PER_WARP; mi++) {
int row = warp_row + mi;
if (pred[mi]) a[mi][i] = A[row*K2 + i*32 + tid];
}
#pragma unroll
for (int ni=0; ni<COLS_PER_BLOCK; ni++) {
// apply here element_wise: RELU(SUM(B1, B2))
// b[ni][i] = B[(sample_id+ni)*K2+i*32+tid];
b[ni][i] = B1[(sample_id+ni)*K2+i*32+tid] + B2[(sample_id+ni)*K2+i*32+tid];
// RELU: if (b[ni][i] < 0.0) b[ni][i] = 0.0;
b[ni][i] = b[ni][i] * __hgeu2(b[ni][i], __float2half2_rn(0.0) );
}
}
for (int mi=0; mi<ROWS_PER_WARP; mi++) {
int row = warp_row + mi;
if (pred[mi]) bias[mi] = d_bias[row];
}
#pragma unroll
for (int mi=0; mi<ROWS_PER_WARP; mi++) {
#pragma unroll
for (int ni=0; ni<COLS_PER_BLOCK; ni++) {
#pragma unroll
for (int i=0; i<K2/32; i++) {
accum[ni][mi] += a[mi][i] * b[ni][i];
}
}
}
__shared__ float result[COLS_PER_BLOCK][32];
#pragma unroll
for (int r=0; r<ROWS_PER_WARP; r++) {
#pragma unroll
for (int ni=0; ni<COLS_PER_BLOCK; ni++) {
// Warp reduce
for (int offset=16; offset>0; offset /= 2) {
accum[ni][r] += __shfl_down_sync(0xFFFFFFFF,accum[ni][r],offset);
}
half val = accum[ni][r].x + accum[ni][r].y;
val += bias[r];
// printf("[%f]",__half2float(val)); // DEBUG
if (save_intermediate && tid == 0 && (warp_row+r<29)) C[(sample_id+ni)*29+warp_row+r] = val;
if (tid == 0) result[ni][warp_id*ROWS_PER_WARP + r] = __half2float(val);
}
}
__syncthreads();
if (warp_id == 0) {
for (int ni=0; ni<COLS_PER_BLOCK; ni++) {
float val = result[ni][threadIdx.x];
int idx = (threadIdx.x<29) ? (int)threadIdx.x : -1;
for (int offset=16; offset>0; offset /= 2) {
int other_idx = __shfl_down_sync(0xFFFFFFFF,idx,offset);
float other_val = __shfl_down_sync(0xFFFFFFFF,val,offset);
if (idx == -1 || (other_idx != -1 && other_val > val)) {
idx = other_idx;
val = other_val;
}
}
if (threadIdx.x == 0) {
top1[sample_id+ni] = idx;
// printf("{%d,%d}:: [sample_id=%d][ni=%d]: %d\n",blockIdx.x, threadIdx.x, sample_id,ni,idx); // DEBUG
}
}
}
}
// void rnntFc2Top1(half2* devFc1EncoderBuffer, // activation #1 (from fc1 encoder)
// half2* devFc1DecoderBuffer, // activation #2 (from fc1 decoder)
// half2* devFc1WeightsBuffer, // FC2 weights
// half* devFc1BiasBuffer, // FC2 bias
// half* devFc2OutputBuffer, // transient FC2 output
// int32_t* devTop1Buffer, // Top1 output
// int batchSize,
// cudaStream_t stream) {
void rnntFc2Top1(uint8_t* devFc1EncoderBuffer, // activation #1 (from fc1 encoder)
uint8_t* devFc1DecoderBuffer, // activation #2 (from fc1 decoder)
uint8_t* devFc1WeightsBuffer, // FC2 weights
uint8_t* devFc1BiasBuffer, // FC2 bias
int32_t* devFc2OutputBuffer, // transient FC2 output
int32_t* devTop1Buffer, // Top1 output
int batchSize,
cudaStream_t stream) {
// static parameters
// const int K = 1024;
const int K = 512; // fc2 input size (i.e., fc1 output size)
const int M = 32;
const int NUM_WARPS = 4;
const int ROWS_PER_WARP = M/NUM_WARPS;
const int COLS_PER_BLOCK = 1;
const bool save_intermediate = false;
// process parameters
int num_blocks = batchSize / COLS_PER_BLOCK;
// cuda kernel
// printf("fc2_top1_kernel<K=%d, NUM_WARPS=%d, ROWS_PER_WARP=%d, COLS_PER_BLOCK=%d><<<%d,%d>>>",K, NUM_WARPS, ROWS_PER_WARP, COLS_PER_BLOCK, num_blocks, NUM_WARPS*32);
fc2_top1_ker<K, NUM_WARPS, ROWS_PER_WARP, COLS_PER_BLOCK, save_intermediate> <<<num_blocks,NUM_WARPS*32, 0, stream>>> (
(half2*) devFc1WeightsBuffer,
(half2*) devFc1EncoderBuffer,
(half2*) devFc1DecoderBuffer,
(half*) devFc2OutputBuffer,
(half*) devFc1BiasBuffer,
(int32_t*) devTop1Buffer);
}
#endif
// ====================
// Encoder gather
// ====================
// Implementing RnntGather for the FC1 encoder input in joint network
//
// ec.igather->step(actualBatchSize, mainStream,
// tc.encoderOut[pol],
// tc.encIdx,
// tc.encGather);
__global__ void rnnt_enc_gather_ker(int32_t* outBuffer, // [bs][seq_len][inner_stride]
int32_t* idxVec, // [bs]
int32_t* inBuffer, // [bs][inner_stride]
int32_t inner_iter,
int32_t inner_stride,
int32_t outer_iter,
int32_t outer_stride,
size_t max_seq_length,
int batchSize)
{
int inner_offset = blockDim.x;
int bs = blockIdx.x * outer_iter;
// outer loop ([bs])
for(int i = 0; i < outer_iter ; i++, bs++)
{
if(bs >= batchSize) continue;
// channel is based on thread Idx
int ch = threadIdx.x;
// get index for the input
int32_t idx = idxVec[bs];
// base pointers
int32_t *pIn = inBuffer + (bs * outer_stride) + (idx * inner_stride);
int32_t *pOut = outBuffer + (bs * inner_stride);
// inner loop ([ch])
for(int j = 0; j < inner_iter ; j++, ch+=inner_offset)
{
if(ch >= inner_stride) continue;
// return zero is out of bounds
if (idx >= max_seq_length) {
pOut[ch] = 0;
continue;
}
// transfer
pOut[ch] = pIn[ch];
}
}
}
void rnntIgatherStep(
uint8_t* devRnntEncoderBuffer, // input from encoder RNNT [bs][seq_len][chan]
int32_t* devEncIdxBuffer, // vector of indexes [bs], int32
uint8_t* devEncGatherBuffer, // output to be consumed by joint::fc1 [bs][chan]
size_t eSize, // element size
size_t encoderChannels, // channel size (hp_encoder_hidden_size)
size_t seqLength, // sequence length (hp_max_seq_length)
int batchSize,
cudaStream_t stream) {
// tiling approach: (example)
// block dimension = 128
// inner iter = encoder channel size / 128 (e.g. 512*2/4 over 128 = 2)
// grid dimension = 128
// outer iter = (batchSize / grid_dimension)'
// static parameters
const int BLOCK_DIM = 128;
const int GRID_DIM = 2048;
// inner iterations
int blockSize = encoderChannels * eSize;
int blockElements = blockSize / sizeof(int32_t);
// assert(blockElements > BLOCK_DIM);
int inner_iter = blockElements / BLOCK_DIM;
// outer iterations
int grid_elements = (batchSize > GRID_DIM)? GRID_DIM : batchSize;
int outer_iter = (batchSize + grid_elements -1) / grid_elements;
// dim3 gridDim = dim3((numThreads + blockDim.x - 1) / blockDim.x, 1, 1);
dim3 blockDim = dim3(BLOCK_DIM, 1, 1);
dim3 gridDim = dim3(grid_elements, 1, 1);
// strides
int32_t inner_stride = blockElements;
int32_t outer_stride = seqLength * inner_stride;
// printf("rnntIgatherStep(bs=%d, esize=%d,encChan=%d,seqLen=%d)<%d,%d>: inner_iter=%d inner_stride=%d outer_iter=%d outer_stride=%d\n ", batchSize, eSize, encoderChannels, seqLength, BLOCK_DIM, grid_elements, inner_iter, inner_stride, outer_iter, outer_stride);
rnnt_enc_gather_ker <<< gridDim, blockDim, 0, stream >>> ((int32_t*) devEncGatherBuffer,
(int32_t*) devEncIdxBuffer,
(int32_t*) devRnntEncoderBuffer,
inner_iter,
inner_stride,
outer_iter,
outer_stride,
seqLength,
batchSize);
}
|
5bdef8a13c406cc002d5ea34d8416f54f5845eee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Utility / shared functionality for bisection kernels */
#ifndef _BISECT_UTIL_H_
#define _BISECT_UTIL_H_
// includes, project
#include "config.h"
#include "util.h"
////////////////////////////////////////////////////////////////////////////////
//! Compute the next lower power of two of n
//! @param n number for which next higher power of two is seeked
////////////////////////////////////////////////////////////////////////////////
__device__
inline int
floorPow2(int n)
{
// early out if already power of two
if (0 == (n & (n-1)))
{
return n;
}
int exp;
frexp((float)n, &exp);
return (1 << (exp - 1));
}
////////////////////////////////////////////////////////////////////////////////
//! Compute the next higher power of two of n
//! @param n number for which next higher power of two is seeked
////////////////////////////////////////////////////////////////////////////////
__device__
inline int
ceilPow2(int n)
{
// early out if already power of two
if (0 == (n & (n-1)))
{
return n;
}
int exp;
frexp((float)n, &exp);
return (1 << exp);
}
////////////////////////////////////////////////////////////////////////////////
//! Compute midpoint of interval [\a left, \a right] avoiding overflow if
//! possible
//! @param left left / lower limit of interval
//! @param right right / upper limit of interval
////////////////////////////////////////////////////////////////////////////////
__device__
inline float
computeMidpoint(const float left, const float right)
{
float mid;
if (sign_f(left) == sign_f(right))
{
mid = left + (right - left) * 0.5f;
}
else
{
mid = (left + right) * 0.5f;
}
return mid;
}
////////////////////////////////////////////////////////////////////////////////
//! Check if interval converged and store appropriately
//! @param addr address where to store the information of the interval
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param left lower limit of interval
//! @param right upper limit of interval
//! @param left_count eigenvalues less than \a left
//! @param right_count eigenvalues less than \a right
//! @param precision desired precision for eigenvalues
////////////////////////////////////////////////////////////////////////////////
template<class S, class T>
__device__
void
storeInterval(unsigned int addr,
float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
float left, float right,
S left_count, S right_count,
float precision)
{
s_left_count[addr] = left_count;
s_right_count[addr] = right_count;
// check if interval converged
float t0 = abs(right - left);
float t1 = max(abs(left), abs(right)) * precision;
if (t0 <= max(MIN_ABS_INTERVAL, t1))
{
// compute mid point
float lambda = computeMidpoint(left, right);
// mark as converged
s_left[addr] = lambda;
s_right[addr] = lambda;
}
else
{
// store current limits
s_left[addr] = left;
s_right[addr] = right;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Compute number of eigenvalues that are smaller than x given a symmetric,
//! real, and tridiagonal matrix
//! @param g_d diagonal elements stored in global memory
//! @param g_s superdiagonal elements stored in global memory
//! @param n size of matrix
//! @param x value for which the number of eigenvalues that are smaller is
//! seeked
//! @param tid thread identified (e.g. threadIdx.x or gtid)
//! @param num_intervals_active number of active intervals / threads that
//! currently process an interval
//! @param s_d scratch space to store diagonal entries of the tridiagonal
//! matrix in shared memory
//! @param s_s scratch space to store superdiagonal entries of the tridiagonal
//! matrix in shared memory
//! @param converged flag if the current thread is already converged (that
//! is count does not have to be computed)
////////////////////////////////////////////////////////////////////////////////
__device__
inline unsigned int
computeNumSmallerEigenvals(float *g_d, float *g_s, const unsigned int n,
const float x,
const unsigned int tid,
const unsigned int num_intervals_active,
float *s_d, float *s_s,
unsigned int converged
)
{
float delta = 1.0f;
unsigned int count = 0;
__syncthreads();
// read data into shared memory
if (threadIdx.x < n)
{
s_d[threadIdx.x] = *(g_d + threadIdx.x);
s_s[threadIdx.x] = *(g_s + threadIdx.x - 1);
}
__syncthreads();
// perform loop only for active threads
if ((tid < num_intervals_active) && (0 == converged))
{
// perform (optimized) Gaussian elimination to determine the number
// of eigenvalues that are smaller than n
for (unsigned int k = 0; k < n; ++k)
{
delta = s_d[k] - x - (s_s[k] * s_s[k]) / delta;
count += (delta < 0) ? 1 : 0;
}
} // end if thread currently processing an interval
return count;
}
////////////////////////////////////////////////////////////////////////////////
//! Compute number of eigenvalues that are smaller than x given a symmetric,
//! real, and tridiagonal matrix
//! @param g_d diagonal elements stored in global memory
//! @param g_s superdiagonal elements stored in global memory
//! @param n size of matrix
//! @param x value for which the number of eigenvalues that are smaller is
//! seeked
//! @param tid thread identified (e.g. threadIdx.x or gtid)
//! @param num_intervals_active number of active intervals / threads that
//! currently process an interval
//! @param s_d scratch space to store diagonal entries of the tridiagonal
//! matrix in shared memory
//! @param s_s scratch space to store superdiagonal entries of the tridiagonal
//! matrix in shared memory
//! @param converged flag if the current thread is already converged (that
//! is count does not have to be computed)
////////////////////////////////////////////////////////////////////////////////
__device__
inline unsigned int
computeNumSmallerEigenvalsLarge(float *g_d, float *g_s, const unsigned int n,
const float x,
const unsigned int tid,
const unsigned int num_intervals_active,
float *s_d, float *s_s,
unsigned int converged
)
{
float delta = 1.0f;
unsigned int count = 0;
unsigned int rem = n;
// do until whole diagonal and superdiagonal has been loaded and processed
for (unsigned int i = 0; i < n; i += blockDim.x)
{
__syncthreads();
// read new chunk of data into shared memory
if ((i + threadIdx.x) < n)
{
s_d[threadIdx.x] = *(g_d + i + threadIdx.x);
s_s[threadIdx.x] = *(g_s + i + threadIdx.x - 1);
}
__syncthreads();
if (tid < num_intervals_active)
{
// perform (optimized) Gaussian elimination to determine the number
// of eigenvalues that are smaller than n
for (unsigned int k = 0; k < min(rem,blockDim.x); ++k)
{
delta = s_d[k] - x - (s_s[k] * s_s[k]) / delta;
// delta = (abs( delta) < (1.0e-10)) ? -(1.0e-10) : delta;
count += (delta < 0) ? 1 : 0;
}
} // end if thread currently processing an interval
rem -= blockDim.x;
}
return count;
}
////////////////////////////////////////////////////////////////////////////////
//! Store all non-empty intervals resulting from the subdivision of the interval
//! currently processed by the thread
//! @param addr base address for storing intervals
//! @param num_threads_active number of threads / intervals in current sweep
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param left lower limit of interval
//! @param mid midpoint of interval
//! @param right upper limit of interval
//! @param left_count eigenvalues less than \a left
//! @param mid_count eigenvalues less than \a mid
//! @param right_count eigenvalues less than \a right
//! @param precision desired precision for eigenvalues
//! @param compact_second_chunk shared mem flag if second chunk is used and
//! ergo requires compaction
//! @param s_compaction_list_exc helper array for stream compaction,
//! s_compaction_list_exc[tid] = 1 when the
//! thread generated two child intervals
//! @is_active_interval mark is thread has a second non-empty child interval
////////////////////////////////////////////////////////////////////////////////
template<class S, class T>
__device__
void
storeNonEmptyIntervals(unsigned int addr,
const unsigned int num_threads_active,
float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
float left, float mid, float right,
const S left_count,
const S mid_count,
const S right_count,
float precision,
unsigned int &compact_second_chunk,
T *s_compaction_list_exc,
unsigned int &is_active_second)
{
// check if both child intervals are valid
if ((left_count != mid_count) && (mid_count != right_count))
{
// store the left interval
storeInterval(addr, s_left, s_right, s_left_count, s_right_count,
left, mid, left_count, mid_count, precision);
// mark that a second interval has been generated, only stored after
// stream compaction of second chunk
is_active_second = 1;
s_compaction_list_exc[threadIdx.x] = 1;
compact_second_chunk = 1;
}
else
{
// only one non-empty child interval
// mark that no second child
is_active_second = 0;
s_compaction_list_exc[threadIdx.x] = 0;
// store the one valid child interval
if (left_count != mid_count)
{
storeInterval(addr, s_left, s_right, s_left_count, s_right_count,
left, mid, left_count, mid_count, precision);
}
else
{
storeInterval(addr, s_left, s_right, s_left_count, s_right_count,
mid, right, mid_count, right_count, precision);
}
}
}
////////////////////////////////////////////////////////////////////////////////
//! Create indices for compaction, that is process \a s_compaction_list_exc
//! which is 1 for intervals that generated a second child and 0 otherwise
//! and create for each of the non-zero elements the index where the new
//! interval belongs to in a compact representation of all generated second
//! childs
//! @param s_compaction_list_exc list containing the flags which threads
//! generated two childs
//! @param num_threads_compaction number of threads to employ for compaction
////////////////////////////////////////////////////////////////////////////////
template<class T>
__device__
void
createIndicesCompaction(T *s_compaction_list_exc,
unsigned int num_threads_compaction)
{
unsigned int offset = 1;
const unsigned int tid = threadIdx.x;
// higher levels of scan tree
for (int d = (num_threads_compaction >> 1); d > 0; d >>= 1)
{
__syncthreads();
if (tid < d)
{
unsigned int ai = offset*(2*tid+1)-1;
unsigned int bi = offset*(2*tid+2)-1;
s_compaction_list_exc[bi] = s_compaction_list_exc[bi]
+ s_compaction_list_exc[ai];
}
offset <<= 1;
}
// traverse down tree: first down to level 2 across
for (int d = 2; d < num_threads_compaction; d <<= 1)
{
offset >>= 1;
__syncthreads();
if (tid < (d-1))
{
unsigned int ai = offset*(tid+1) - 1;
unsigned int bi = ai + (offset >> 1);
s_compaction_list_exc[bi] = s_compaction_list_exc[bi]
+ s_compaction_list_exc[ai];
}
}
__syncthreads();
}
///////////////////////////////////////////////////////////////////////////////
//! Perform stream compaction for second child intervals
//! @param s_left shared
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param mid midpoint of current interval (left of new interval)
//! @param right upper limit of interval
//! @param mid_count eigenvalues less than \a mid
//! @param s_compaction_list list containing the indices where the data has
//! to be stored
//! @param num_threads_active number of active threads / intervals
//! @is_active_interval mark is thread has a second non-empty child interval
///////////////////////////////////////////////////////////////////////////////
template<class T>
__device__
void
compactIntervals(float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
float mid, float right,
unsigned int mid_count, unsigned int right_count,
T *s_compaction_list,
unsigned int num_threads_active,
unsigned int is_active_second)
{
const unsigned int tid = threadIdx.x;
// perform compaction / copy data for all threads where the second
// child is not dead
if ((tid < num_threads_active) && (1 == is_active_second))
{
unsigned int addr_w = num_threads_active + s_compaction_list[tid];
s_left[addr_w] = mid;
s_right[addr_w] = right;
s_left_count[addr_w] = mid_count;
s_right_count[addr_w] = right_count;
}
}
///////////////////////////////////////////////////////////////////////////////
//! Store intervals that have already converged (w.r.t. the desired precision),
//! duplicating intervals that contain multiple eigenvalues
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param left lower limit of interval
//! @param mid midpoint of interval (updated if split is necessary)
//! @param right upper limit of interval
//! @param left_count eigenvalues less than \a left
//! @param mid_count eigenvalues less than \a mid
//! @param right_count eigenvalues less than \a right
//! @param s_compaction_list_exc helper array for stream compaction, updated
//! at tid if split is necessary
//! @param compact_second_chunk shared mem flag if second chunk is used and
//! ergo requires compaction
//! @param num_threads_active number of active threads / intervals
///////////////////////////////////////////////////////////////////////////////
template<class T, class S>
__device__
void
storeIntervalConverged(float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
float &left, float &mid, float &right,
S &left_count, S &mid_count, S &right_count,
T *s_compaction_list_exc,
unsigned int &compact_second_chunk,
const unsigned int num_threads_active)
{
const unsigned int tid = threadIdx.x;
const unsigned int multiplicity = right_count - left_count;
// check multiplicity of eigenvalue
if (1 == multiplicity)
{
// just re-store intervals, simple eigenvalue
s_left[tid] = left;
s_right[tid] = right;
s_left_count[tid] = left_count;
s_right_count[tid] = right_count;
// mark that no second child / clear
s_right_count[tid + num_threads_active] = 0;
s_compaction_list_exc[tid] = 0;
}
else
{
// number of eigenvalues after the split less than mid
mid_count = left_count + (multiplicity >> 1);
// store left interval
s_left[tid] = left;
s_right[tid] = right;
s_left_count[tid] = left_count;
s_right_count[tid] = mid_count;
mid = left;
// mark that second child interval exists
s_right_count[tid + num_threads_active] = right_count;
s_compaction_list_exc[tid] = 1;
compact_second_chunk = 1;
}
}
template<class T, class S>
__device__
void
storeIntervalConverged(float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
float &left, float &mid, float &right,
S &left_count, S &mid_count, S &right_count,
T *s_compaction_list_exc,
unsigned int &compact_second_chunk,
const unsigned int num_threads_active,
unsigned int &is_active_second)
{
const unsigned int tid = threadIdx.x;
const unsigned int multiplicity = right_count - left_count;
// check multiplicity of eigenvalue
if (1 == multiplicity)
{
// just re-store intervals, simple eigenvalue
s_left[tid] = left;
s_right[tid] = right;
s_left_count[tid] = left_count;
s_right_count[tid] = right_count;
// mark that no second child / clear
is_active_second = 0;
s_compaction_list_exc[tid] = 0;
}
else
{
// number of eigenvalues after the split less than mid
mid_count = left_count + (multiplicity >> 1);
// store left interval
s_left[tid] = left;
s_right[tid] = right;
s_left_count[tid] = left_count;
s_right_count[tid] = mid_count;
mid = left;
// mark that second child interval exists
is_active_second = 1;
s_compaction_list_exc[tid] = 1;
compact_second_chunk = 1;
}
}
///////////////////////////////////////////////////////////////////////////////
//! Subdivide interval if active and not already converged
//! @param tid id of thread
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param num_threads_active number of active threads in warp
//! @param left lower limit of interval
//! @param right upper limit of interval
//! @param left_count eigenvalues less than \a left
//! @param right_count eigenvalues less than \a right
//! @param all_threads_converged shared memory flag if all threads are
//! converged
///////////////////////////////////////////////////////////////////////////////
template<class T>
__device__
void
subdivideActiveInterval(const unsigned int tid,
float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
const unsigned int num_threads_active,
float &left, float &right,
unsigned int &left_count, unsigned int &right_count,
float &mid, unsigned int &all_threads_converged)
{
// for all active threads
if (tid < num_threads_active)
{
left = s_left[tid];
right = s_right[tid];
left_count = s_left_count[tid];
right_count = s_right_count[tid];
// check if thread already converged
if (left != right)
{
mid = computeMidpoint(left, right);
all_threads_converged = 0;
}
else if ((right_count - left_count) > 1)
{
// mark as not converged if multiple eigenvalues enclosed
// duplicate interval in storeIntervalsConverged()
all_threads_converged = 0;
}
} // end for all active threads
}
#endif // #ifndef _BISECT_UTIL_H_
| 5bdef8a13c406cc002d5ea34d8416f54f5845eee.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Utility / shared functionality for bisection kernels */
#ifndef _BISECT_UTIL_H_
#define _BISECT_UTIL_H_
// includes, project
#include "config.h"
#include "util.h"
////////////////////////////////////////////////////////////////////////////////
//! Compute the next lower power of two of n
//! @param n number for which next higher power of two is seeked
////////////////////////////////////////////////////////////////////////////////
__device__
inline int
floorPow2(int n)
{
// early out if already power of two
if (0 == (n & (n-1)))
{
return n;
}
int exp;
frexp((float)n, &exp);
return (1 << (exp - 1));
}
////////////////////////////////////////////////////////////////////////////////
//! Compute the next higher power of two of n
//! @param n number for which next higher power of two is seeked
////////////////////////////////////////////////////////////////////////////////
__device__
inline int
ceilPow2(int n)
{
// early out if already power of two
if (0 == (n & (n-1)))
{
return n;
}
int exp;
frexp((float)n, &exp);
return (1 << exp);
}
////////////////////////////////////////////////////////////////////////////////
//! Compute midpoint of interval [\a left, \a right] avoiding overflow if
//! possible
//! @param left left / lower limit of interval
//! @param right right / upper limit of interval
////////////////////////////////////////////////////////////////////////////////
__device__
inline float
computeMidpoint(const float left, const float right)
{
float mid;
if (sign_f(left) == sign_f(right))
{
mid = left + (right - left) * 0.5f;
}
else
{
mid = (left + right) * 0.5f;
}
return mid;
}
////////////////////////////////////////////////////////////////////////////////
//! Check if interval converged and store appropriately
//! @param addr address where to store the information of the interval
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param left lower limit of interval
//! @param right upper limit of interval
//! @param left_count eigenvalues less than \a left
//! @param right_count eigenvalues less than \a right
//! @param precision desired precision for eigenvalues
////////////////////////////////////////////////////////////////////////////////
template<class S, class T>
__device__
void
storeInterval(unsigned int addr,
float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
float left, float right,
S left_count, S right_count,
float precision)
{
s_left_count[addr] = left_count;
s_right_count[addr] = right_count;
// check if interval converged
float t0 = abs(right - left);
float t1 = max(abs(left), abs(right)) * precision;
if (t0 <= max(MIN_ABS_INTERVAL, t1))
{
// compute mid point
float lambda = computeMidpoint(left, right);
// mark as converged
s_left[addr] = lambda;
s_right[addr] = lambda;
}
else
{
// store current limits
s_left[addr] = left;
s_right[addr] = right;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Compute number of eigenvalues that are smaller than x given a symmetric,
//! real, and tridiagonal matrix
//! @param g_d diagonal elements stored in global memory
//! @param g_s superdiagonal elements stored in global memory
//! @param n size of matrix
//! @param x value for which the number of eigenvalues that are smaller is
//! seeked
//! @param tid thread identified (e.g. threadIdx.x or gtid)
//! @param num_intervals_active number of active intervals / threads that
//! currently process an interval
//! @param s_d scratch space to store diagonal entries of the tridiagonal
//! matrix in shared memory
//! @param s_s scratch space to store superdiagonal entries of the tridiagonal
//! matrix in shared memory
//! @param converged flag if the current thread is already converged (that
//! is count does not have to be computed)
////////////////////////////////////////////////////////////////////////////////
__device__
inline unsigned int
computeNumSmallerEigenvals(float *g_d, float *g_s, const unsigned int n,
const float x,
const unsigned int tid,
const unsigned int num_intervals_active,
float *s_d, float *s_s,
unsigned int converged
)
{
float delta = 1.0f;
unsigned int count = 0;
__syncthreads();
// read data into shared memory
if (threadIdx.x < n)
{
s_d[threadIdx.x] = *(g_d + threadIdx.x);
s_s[threadIdx.x] = *(g_s + threadIdx.x - 1);
}
__syncthreads();
// perform loop only for active threads
if ((tid < num_intervals_active) && (0 == converged))
{
// perform (optimized) Gaussian elimination to determine the number
// of eigenvalues that are smaller than n
for (unsigned int k = 0; k < n; ++k)
{
delta = s_d[k] - x - (s_s[k] * s_s[k]) / delta;
count += (delta < 0) ? 1 : 0;
}
} // end if thread currently processing an interval
return count;
}
////////////////////////////////////////////////////////////////////////////////
//! Compute number of eigenvalues that are smaller than x given a symmetric,
//! real, and tridiagonal matrix
//! @param g_d diagonal elements stored in global memory
//! @param g_s superdiagonal elements stored in global memory
//! @param n size of matrix
//! @param x value for which the number of eigenvalues that are smaller is
//! seeked
//! @param tid thread identified (e.g. threadIdx.x or gtid)
//! @param num_intervals_active number of active intervals / threads that
//! currently process an interval
//! @param s_d scratch space to store diagonal entries of the tridiagonal
//! matrix in shared memory
//! @param s_s scratch space to store superdiagonal entries of the tridiagonal
//! matrix in shared memory
//! @param converged flag if the current thread is already converged (that
//! is count does not have to be computed)
////////////////////////////////////////////////////////////////////////////////
__device__
inline unsigned int
computeNumSmallerEigenvalsLarge(float *g_d, float *g_s, const unsigned int n,
const float x,
const unsigned int tid,
const unsigned int num_intervals_active,
float *s_d, float *s_s,
unsigned int converged
)
{
float delta = 1.0f;
unsigned int count = 0;
unsigned int rem = n;
// do until whole diagonal and superdiagonal has been loaded and processed
for (unsigned int i = 0; i < n; i += blockDim.x)
{
__syncthreads();
// read new chunk of data into shared memory
if ((i + threadIdx.x) < n)
{
s_d[threadIdx.x] = *(g_d + i + threadIdx.x);
s_s[threadIdx.x] = *(g_s + i + threadIdx.x - 1);
}
__syncthreads();
if (tid < num_intervals_active)
{
// perform (optimized) Gaussian elimination to determine the number
// of eigenvalues that are smaller than n
for (unsigned int k = 0; k < min(rem,blockDim.x); ++k)
{
delta = s_d[k] - x - (s_s[k] * s_s[k]) / delta;
// delta = (abs( delta) < (1.0e-10)) ? -(1.0e-10) : delta;
count += (delta < 0) ? 1 : 0;
}
} // end if thread currently processing an interval
rem -= blockDim.x;
}
return count;
}
////////////////////////////////////////////////////////////////////////////////
//! Store all non-empty intervals resulting from the subdivision of the interval
//! currently processed by the thread
//! @param addr base address for storing intervals
//! @param num_threads_active number of threads / intervals in current sweep
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param left lower limit of interval
//! @param mid midpoint of interval
//! @param right upper limit of interval
//! @param left_count eigenvalues less than \a left
//! @param mid_count eigenvalues less than \a mid
//! @param right_count eigenvalues less than \a right
//! @param precision desired precision for eigenvalues
//! @param compact_second_chunk shared mem flag if second chunk is used and
//! ergo requires compaction
//! @param s_compaction_list_exc helper array for stream compaction,
//! s_compaction_list_exc[tid] = 1 when the
//! thread generated two child intervals
//! @is_active_interval mark is thread has a second non-empty child interval
////////////////////////////////////////////////////////////////////////////////
template<class S, class T>
__device__
void
storeNonEmptyIntervals(unsigned int addr,
const unsigned int num_threads_active,
float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
float left, float mid, float right,
const S left_count,
const S mid_count,
const S right_count,
float precision,
unsigned int &compact_second_chunk,
T *s_compaction_list_exc,
unsigned int &is_active_second)
{
// check if both child intervals are valid
if ((left_count != mid_count) && (mid_count != right_count))
{
// store the left interval
storeInterval(addr, s_left, s_right, s_left_count, s_right_count,
left, mid, left_count, mid_count, precision);
// mark that a second interval has been generated, only stored after
// stream compaction of second chunk
is_active_second = 1;
s_compaction_list_exc[threadIdx.x] = 1;
compact_second_chunk = 1;
}
else
{
// only one non-empty child interval
// mark that no second child
is_active_second = 0;
s_compaction_list_exc[threadIdx.x] = 0;
// store the one valid child interval
if (left_count != mid_count)
{
storeInterval(addr, s_left, s_right, s_left_count, s_right_count,
left, mid, left_count, mid_count, precision);
}
else
{
storeInterval(addr, s_left, s_right, s_left_count, s_right_count,
mid, right, mid_count, right_count, precision);
}
}
}
////////////////////////////////////////////////////////////////////////////////
//! Create indices for compaction, that is process \a s_compaction_list_exc
//! which is 1 for intervals that generated a second child and 0 otherwise
//! and create for each of the non-zero elements the index where the new
//! interval belongs to in a compact representation of all generated second
//! childs
//! @param s_compaction_list_exc list containing the flags which threads
//! generated two childs
//! @param num_threads_compaction number of threads to employ for compaction
////////////////////////////////////////////////////////////////////////////////
template<class T>
__device__
void
createIndicesCompaction(T *s_compaction_list_exc,
unsigned int num_threads_compaction)
{
unsigned int offset = 1;
const unsigned int tid = threadIdx.x;
// higher levels of scan tree
for (int d = (num_threads_compaction >> 1); d > 0; d >>= 1)
{
__syncthreads();
if (tid < d)
{
unsigned int ai = offset*(2*tid+1)-1;
unsigned int bi = offset*(2*tid+2)-1;
s_compaction_list_exc[bi] = s_compaction_list_exc[bi]
+ s_compaction_list_exc[ai];
}
offset <<= 1;
}
// traverse down tree: first down to level 2 across
for (int d = 2; d < num_threads_compaction; d <<= 1)
{
offset >>= 1;
__syncthreads();
if (tid < (d-1))
{
unsigned int ai = offset*(tid+1) - 1;
unsigned int bi = ai + (offset >> 1);
s_compaction_list_exc[bi] = s_compaction_list_exc[bi]
+ s_compaction_list_exc[ai];
}
}
__syncthreads();
}
///////////////////////////////////////////////////////////////////////////////
//! Perform stream compaction for second child intervals
//! @param s_left shared
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param mid midpoint of current interval (left of new interval)
//! @param right upper limit of interval
//! @param mid_count eigenvalues less than \a mid
//! @param s_compaction_list list containing the indices where the data has
//! to be stored
//! @param num_threads_active number of active threads / intervals
//! @is_active_interval mark is thread has a second non-empty child interval
///////////////////////////////////////////////////////////////////////////////
template<class T>
__device__
void
compactIntervals(float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
float mid, float right,
unsigned int mid_count, unsigned int right_count,
T *s_compaction_list,
unsigned int num_threads_active,
unsigned int is_active_second)
{
const unsigned int tid = threadIdx.x;
// perform compaction / copy data for all threads where the second
// child is not dead
if ((tid < num_threads_active) && (1 == is_active_second))
{
unsigned int addr_w = num_threads_active + s_compaction_list[tid];
s_left[addr_w] = mid;
s_right[addr_w] = right;
s_left_count[addr_w] = mid_count;
s_right_count[addr_w] = right_count;
}
}
///////////////////////////////////////////////////////////////////////////////
//! Store intervals that have already converged (w.r.t. the desired precision),
//! duplicating intervals that contain multiple eigenvalues
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param left lower limit of interval
//! @param mid midpoint of interval (updated if split is necessary)
//! @param right upper limit of interval
//! @param left_count eigenvalues less than \a left
//! @param mid_count eigenvalues less than \a mid
//! @param right_count eigenvalues less than \a right
//! @param s_compaction_list_exc helper array for stream compaction, updated
//! at tid if split is necessary
//! @param compact_second_chunk shared mem flag if second chunk is used and
//! ergo requires compaction
//! @param num_threads_active number of active threads / intervals
///////////////////////////////////////////////////////////////////////////////
template<class T, class S>
__device__
void
storeIntervalConverged(float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
float &left, float &mid, float &right,
S &left_count, S &mid_count, S &right_count,
T *s_compaction_list_exc,
unsigned int &compact_second_chunk,
const unsigned int num_threads_active)
{
const unsigned int tid = threadIdx.x;
const unsigned int multiplicity = right_count - left_count;
// check multiplicity of eigenvalue
if (1 == multiplicity)
{
// just re-store intervals, simple eigenvalue
s_left[tid] = left;
s_right[tid] = right;
s_left_count[tid] = left_count;
s_right_count[tid] = right_count;
// mark that no second child / clear
s_right_count[tid + num_threads_active] = 0;
s_compaction_list_exc[tid] = 0;
}
else
{
// number of eigenvalues after the split less than mid
mid_count = left_count + (multiplicity >> 1);
// store left interval
s_left[tid] = left;
s_right[tid] = right;
s_left_count[tid] = left_count;
s_right_count[tid] = mid_count;
mid = left;
// mark that second child interval exists
s_right_count[tid + num_threads_active] = right_count;
s_compaction_list_exc[tid] = 1;
compact_second_chunk = 1;
}
}
template<class T, class S>
__device__
void
storeIntervalConverged(float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
float &left, float &mid, float &right,
S &left_count, S &mid_count, S &right_count,
T *s_compaction_list_exc,
unsigned int &compact_second_chunk,
const unsigned int num_threads_active,
unsigned int &is_active_second)
{
const unsigned int tid = threadIdx.x;
const unsigned int multiplicity = right_count - left_count;
// check multiplicity of eigenvalue
if (1 == multiplicity)
{
// just re-store intervals, simple eigenvalue
s_left[tid] = left;
s_right[tid] = right;
s_left_count[tid] = left_count;
s_right_count[tid] = right_count;
// mark that no second child / clear
is_active_second = 0;
s_compaction_list_exc[tid] = 0;
}
else
{
// number of eigenvalues after the split less than mid
mid_count = left_count + (multiplicity >> 1);
// store left interval
s_left[tid] = left;
s_right[tid] = right;
s_left_count[tid] = left_count;
s_right_count[tid] = mid_count;
mid = left;
// mark that second child interval exists
is_active_second = 1;
s_compaction_list_exc[tid] = 1;
compact_second_chunk = 1;
}
}
///////////////////////////////////////////////////////////////////////////////
//! Subdivide interval if active and not already converged
//! @param tid id of thread
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param num_threads_active number of active threads in warp
//! @param left lower limit of interval
//! @param right upper limit of interval
//! @param left_count eigenvalues less than \a left
//! @param right_count eigenvalues less than \a right
//! @param all_threads_converged shared memory flag if all threads are
//! converged
///////////////////////////////////////////////////////////////////////////////
template<class T>
__device__
void
subdivideActiveInterval(const unsigned int tid,
float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
const unsigned int num_threads_active,
float &left, float &right,
unsigned int &left_count, unsigned int &right_count,
float &mid, unsigned int &all_threads_converged)
{
// for all active threads
if (tid < num_threads_active)
{
left = s_left[tid];
right = s_right[tid];
left_count = s_left_count[tid];
right_count = s_right_count[tid];
// check if thread already converged
if (left != right)
{
mid = computeMidpoint(left, right);
all_threads_converged = 0;
}
else if ((right_count - left_count) > 1)
{
// mark as not converged if multiple eigenvalues enclosed
// duplicate interval in storeIntervalsConverged()
all_threads_converged = 0;
}
} // end for all active threads
}
#endif // #ifndef _BISECT_UTIL_H_
|
06c337c5101b03175bd8d85066ded2da8e00c270.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/operators/graph_send_recv_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/place.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T, typename IndexT>
struct GraphSendRecvSumCUDAFunctor {
DEVICE inline void operator()(const T* params, T* output, const IndexT& in_i,
const IndexT& out_i) {
paddle::platform::CudaAtomicAdd(output + out_i, *(params + in_i));
}
};
template <typename T, typename IndexT>
struct GraphSendRecvMaxCUDAFunctor {
DEVICE inline void operator()(const T* params, T* output, const IndexT& in_i,
const IndexT& out_i) {
paddle::platform::CudaAtomicMax(output + out_i, *(params + in_i));
}
};
template <typename T, typename IndexT>
struct GraphSendRecvMinCUDAFunctor {
DEVICE inline void operator()(const T* params, T* output, const IndexT& in_i,
const IndexT& out_i) {
paddle::platform::CudaAtomicMin(output + out_i, *(params + in_i));
}
};
template <typename T, typename IndexT, typename Functor>
__global__ void GraphSendRecvCUDAKernel(const T* params,
const IndexT* src_indices,
const IndexT* dst_indices, T* output,
size_t index_size, size_t slice_size,
Functor functor) {
CUDA_KERNEL_LOOP_TYPE(i, index_size * slice_size, int64_t) {
int64_t indices_i = i / slice_size;
int64_t slice_i = i - indices_i * slice_size;
IndexT src_i = src_indices[indices_i];
IndexT dst_i = dst_indices[indices_i];
int64_t in_i = src_i * slice_size + slice_i;
int64_t out_i = dst_i * slice_size + slice_i;
functor(params, output, in_i, out_i);
}
}
// For max
template <typename T>
__global__ void InputResetMaxCUDAKernel(T* output, size_t input_size,
size_t slice_size) {
CUDA_KERNEL_LOOP_TYPE(i, input_size * slice_size, int64_t) {
if (*(output + i) == std::numeric_limits<T>::min()) {
*(output + i) = 0;
}
}
}
// For min
template <typename T>
__global__ void InputResetMinCUDAKernel(T* output, size_t input_size,
size_t slice_size) {
CUDA_KERNEL_LOOP_TYPE(i, input_size * slice_size, int64_t) {
if (*(output + i) == std::numeric_limits<T>::max()) {
*(output + i) = 0;
}
}
}
// Get dst_count
template <typename T, typename IndexT>
__global__ void ComputeCountCUDAKernel(int* count, const IndexT* dst_indices,
size_t index_size) {
CUDA_KERNEL_LOOP_TYPE(i, index_size, int64_t) {
IndexT dst_i = dst_indices[i];
paddle::platform::CudaAtomicAdd(count + dst_i, 1);
}
}
// For forward mean
template <typename T>
__global__ void ManipulateMeanCUDAKernel(T* output, int* count,
size_t input_size, size_t slice_size) {
CUDA_KERNEL_LOOP_TYPE(i, input_size * slice_size, int64_t) {
int64_t c_index = i / slice_size;
if (*(count + c_index) > 1) {
*(output + i) = *(output + i) / *(count + c_index);
}
}
}
// For backward mean
template <typename T, typename IndexT>
__global__ void ManipulateMeanGradCUDAKernel(
const T* params, const IndexT* src_indices, const IndexT* dst_indices,
T* output, size_t index_size, size_t slice_size, const int* dst_count) {
CUDA_KERNEL_LOOP_TYPE(i, index_size * slice_size, int64_t) {
int64_t indices_i = i / slice_size;
int64_t slice_i = i - indices_i * slice_size;
IndexT src_i = src_indices[indices_i];
IndexT dst_i = dst_indices[indices_i];
int64_t in_i = src_i * slice_size + slice_i;
int64_t out_i = dst_i * slice_size + slice_i;
paddle::platform::CudaAtomicAdd(output + out_i,
*(params + in_i) / dst_count[src_i]);
}
}
// For backward min and max
template <typename T, typename IndexT>
__global__ void ManipulateMinMaxGradCUDAKernel(
const T* params, const IndexT* src_indices, const IndexT* dst_indices,
T* output, size_t index_size, size_t slice_size, const T* ptr_input,
const T* ptr_output) {
CUDA_KERNEL_LOOP_TYPE(i, index_size * slice_size, int64_t) {
int64_t indices_i = i / slice_size;
int64_t slice_i = i - indices_i * slice_size;
IndexT src_i = src_indices[indices_i];
IndexT dst_i = dst_indices[indices_i];
int64_t in_i = src_i * slice_size + slice_i;
int64_t out_i = dst_i * slice_size + slice_i;
paddle::platform::CudaAtomicAdd(
output + out_i,
*(params + in_i) * (*(ptr_input + out_i) == *(ptr_output + in_i)));
}
}
template <typename DeviceContext, typename T, typename IndexT>
void GraphSendRecvOpCUDAKernelLaunchHelper(
const framework::ExecutionContext& ctx, const Tensor& src_index,
const Tensor& dst_index) {
auto* X = ctx.Input<Tensor>("X");
auto* Y = ctx.Output<Tensor>("Out");
std::string pool_type = ctx.Attr<std::string>("pool_type");
const int& index_size = src_index.dims()[0];
T* p_output = Y->mutable_data<T>(ctx.GetPlace());
const auto& src_dims = X->dims();
int64_t memset_size = 1;
for (int i = 0; i < src_dims.size(); ++i) {
memset_size *= src_dims[i];
}
const size_t& memset_bytes = memset_size * sizeof(T);
if (pool_type == "SUM" || pool_type == "MEAN") {
#ifdef PADDLE_WITH_HIP
hipMemset(p_output, 0, memset_bytes);
#else
hipMemset(p_output, 0, memset_bytes);
#endif
} else if (pool_type == "MAX") {
thrust::device_ptr<T> p_output_ptr(p_output);
thrust::fill(thrust::device, p_output_ptr, p_output_ptr + memset_size,
std::numeric_limits<T>::min());
} else if (pool_type == "MIN") {
thrust::device_ptr<T> p_output_ptr(p_output);
thrust::fill(thrust::device, p_output_ptr, p_output_ptr + memset_size,
std::numeric_limits<T>::max());
}
if (index_size == 0) return;
int64_t slice_size = 1;
for (int i = 1; i < src_dims.size(); ++i) {
slice_size *= src_dims[i];
}
const T* p_src = X->data<T>();
const IndexT* s_index = src_index.data<IndexT>();
const IndexT* d_index = dst_index.data<IndexT>();
#ifdef PADDLE_WITH_HIP
int block = 256;
#else
int block = 1024;
#endif
int64_t n = slice_size * index_size;
const auto& dev_ctx = ctx.cuda_device_context();
int64_t max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize().x;
int64_t grid_tmp = (n + block - 1) / block;
int64_t grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx;
int64_t input_size = src_dims[0];
if (pool_type == "SUM") {
GraphSendRecvSumCUDAFunctor<T, IndexT> functor;
hipLaunchKernelGGL(( GraphSendRecvCUDAKernel<T, IndexT,
GraphSendRecvSumCUDAFunctor<T, IndexT>>),
dim3(grid), dim3(block), 0, reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream(), p_src, s_index, d_index, p_output,
index_size, slice_size, functor);
} else if (pool_type == "MAX") {
GraphSendRecvMaxCUDAFunctor<T, IndexT> functor;
hipLaunchKernelGGL(( GraphSendRecvCUDAKernel<T, IndexT,
GraphSendRecvMaxCUDAFunctor<T, IndexT>>),
dim3(grid), dim3(block), 0, reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream(), p_src, s_index, d_index, p_output,
index_size, slice_size, functor);
int64_t grid_max_tmp = (input_size * slice_size + block - 1) / block;
int64_t grid_max =
grid_max_tmp < max_grid_dimx ? grid_max_tmp : max_grid_dimx;
hipLaunchKernelGGL(( InputResetMaxCUDAKernel<
T>), dim3(grid_max), dim3(block), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream(), p_output, input_size, slice_size);
} else if (pool_type == "MIN") {
GraphSendRecvMinCUDAFunctor<T, IndexT> functor;
hipLaunchKernelGGL(( GraphSendRecvCUDAKernel<T, IndexT,
GraphSendRecvMinCUDAFunctor<T, IndexT>>),
dim3(grid), dim3(block), 0, reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream(), p_src, s_index, d_index, p_output,
index_size, slice_size, functor);
int64_t grid_min_tmp = (input_size * slice_size + block - 1) / block;
int64_t grid_min =
grid_min_tmp < max_grid_dimx ? grid_min_tmp : max_grid_dimx;
hipLaunchKernelGGL(( InputResetMinCUDAKernel<
T>), dim3(grid_min), dim3(block), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream(), p_output, input_size, slice_size);
} else if (pool_type == "MEAN") {
GraphSendRecvSumCUDAFunctor<T, IndexT> functor;
hipLaunchKernelGGL(( GraphSendRecvCUDAKernel<T, IndexT,
GraphSendRecvSumCUDAFunctor<T, IndexT>>),
dim3(grid), dim3(block), 0, reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream(), p_src, s_index, d_index, p_output,
index_size, slice_size, functor);
auto* dst_count = ctx.Output<Tensor>("Dst_count");
int* p_dst_count = dst_count->mutable_data<int>(ctx.GetPlace());
#ifdef PADDLE_WITH_HIP
hipMemset(p_dst_count, 0, input_size * sizeof(int));
#else
hipMemset(p_dst_count, 0, input_size * sizeof(int));
#endif
int64_t grid_count = (index_size + block - 1) / block;
hipLaunchKernelGGL(( ComputeCountCUDAKernel<
T, IndexT>), dim3(grid_count), dim3(block), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream(), p_dst_count, d_index, index_size);
int64_t grid_mean_tmp = (input_size * slice_size + block - 1) / block;
int64_t grid_mean =
grid_mean_tmp < max_grid_dimx ? grid_mean_tmp : max_grid_dimx;
hipLaunchKernelGGL(( ManipulateMeanCUDAKernel<
T>), dim3(grid_mean), dim3(block), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream(), p_output, p_dst_count, input_size, slice_size);
}
}
template <typename DeviceContext, typename T, typename IndexT>
void GraphSendRecvGradOpCUDAKernelLaunchHelper(
const framework::ExecutionContext& ctx, const Tensor& src_index,
const Tensor& dst_index) {
auto* X = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* Y = ctx.Output<Tensor>(framework::GradVarName("X"));
std::string pool_type = ctx.Attr<std::string>("pool_type");
const int& index_size = src_index.dims()[0];
T* p_output = Y->mutable_data<T>(ctx.GetPlace());
const auto& src_dims = X->dims();
int64_t memset_size = 1;
for (int i = 0; i < src_dims.size(); ++i) {
memset_size *= src_dims[i];
}
const size_t& memset_bytes = memset_size * sizeof(T);
#ifdef PADDLE_WITH_HIP
hipMemset(p_output, 0, memset_bytes);
#else
hipMemset(p_output, 0, memset_bytes);
#endif
if (index_size == 0) return;
int64_t slice_size = 1;
for (int i = 1; i < src_dims.size(); ++i) {
slice_size *= src_dims[i];
}
const T* p_src = X->data<T>();
const IndexT* s_index = src_index.data<IndexT>();
const IndexT* d_index = dst_index.data<IndexT>();
#ifdef PADDLE_WITH_HIP
int block = 256;
#else
int block = 1024;
#endif
int64_t n = slice_size * index_size;
const auto& dev_ctx = ctx.cuda_device_context();
int64_t max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize().x;
int64_t grid_tmp = (n + block - 1) / block;
int64_t grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx;
int64_t input_size = src_dims[0];
if (pool_type == "SUM") {
GraphSendRecvSumCUDAFunctor<T, IndexT> functor;
hipLaunchKernelGGL(( GraphSendRecvCUDAKernel<T, IndexT,
GraphSendRecvSumCUDAFunctor<T, IndexT>>),
dim3(grid), dim3(block), 0, reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream(), p_src, s_index, d_index, p_output,
index_size, slice_size, functor);
} else if (pool_type == "MEAN") {
auto* dst_count = ctx.Input<Tensor>("Dst_count");
const int* s_count = dst_count->data<int>();
hipLaunchKernelGGL(( ManipulateMeanGradCUDAKernel<T, IndexT>),
dim3(grid), dim3(block), 0, reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream(), p_src, s_index, d_index, p_output,
index_size, slice_size, s_count);
} else if (pool_type == "MAX" || pool_type == "MIN") {
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Input<Tensor>("Out");
const T* ptr_input = input->data<T>();
const T* ptr_output = output->data<T>();
hipLaunchKernelGGL(( ManipulateMinMaxGradCUDAKernel<T, IndexT>),
dim3(grid), dim3(block), 0, reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream(), p_src, s_index, d_index, p_output,
index_size, slice_size, ptr_input,
ptr_output);
}
}
template <typename DeviceContext, typename T>
class GraphSendRecvOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* src_index = ctx.Input<Tensor>("Src_index");
auto* dst_index = ctx.Input<Tensor>("Dst_index");
auto index_type = src_index->type();
if (index_type == framework::proto::VarType::INT32) {
GraphSendRecvOpCUDAKernelLaunchHelper<DeviceContext, T, int>(
ctx, *src_index, *dst_index);
} else if (index_type == framework::proto::VarType::INT64) {
GraphSendRecvOpCUDAKernelLaunchHelper<DeviceContext, T, int64_t>(
ctx, *src_index, *dst_index);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Unsupported Src_index or Dst_index dtype, expected int, int64, but "
"got %s.",
index_type));
}
}
};
template <typename DeviceContext, typename T>
class GraphSendRecvGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* src_index = ctx.Input<Tensor>("Dst_index");
auto* dst_index = ctx.Input<Tensor>("Src_index");
auto index_type = src_index->type();
if (index_type == framework::proto::VarType::INT32) {
GraphSendRecvGradOpCUDAKernelLaunchHelper<DeviceContext, T, int>(
ctx, *src_index, *dst_index);
} else if (index_type == framework::proto::VarType::INT64) {
GraphSendRecvGradOpCUDAKernelLaunchHelper<DeviceContext, T, int64_t>(
ctx, *src_index, *dst_index);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Unsupported Src_index or Dst_index dtype, expected int, int64, but "
"got %s.",
index_type));
}
}
};
} // namespace operators
} // namespace paddle
using CUDA = paddle::platform::CUDADeviceContext;
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(graph_send_recv,
ops::GraphSendRecvOpCUDAKernel<CUDA, float>,
ops::GraphSendRecvOpCUDAKernel<CUDA, double>,
ops::GraphSendRecvOpCUDAKernel<CUDA, int>,
ops::GraphSendRecvOpCUDAKernel<CUDA, int64_t>);
REGISTER_OP_CUDA_KERNEL(graph_send_recv_grad,
ops::GraphSendRecvGradOpCUDAKernel<CUDA, float>,
ops::GraphSendRecvGradOpCUDAKernel<CUDA, double>,
ops::GraphSendRecvGradOpCUDAKernel<CUDA, int>,
ops::GraphSendRecvGradOpCUDAKernel<CUDA, int64_t>);
| 06c337c5101b03175bd8d85066ded2da8e00c270.cu | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/operators/graph_send_recv_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/place.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T, typename IndexT>
struct GraphSendRecvSumCUDAFunctor {
DEVICE inline void operator()(const T* params, T* output, const IndexT& in_i,
const IndexT& out_i) {
paddle::platform::CudaAtomicAdd(output + out_i, *(params + in_i));
}
};
template <typename T, typename IndexT>
struct GraphSendRecvMaxCUDAFunctor {
DEVICE inline void operator()(const T* params, T* output, const IndexT& in_i,
const IndexT& out_i) {
paddle::platform::CudaAtomicMax(output + out_i, *(params + in_i));
}
};
template <typename T, typename IndexT>
struct GraphSendRecvMinCUDAFunctor {
DEVICE inline void operator()(const T* params, T* output, const IndexT& in_i,
const IndexT& out_i) {
paddle::platform::CudaAtomicMin(output + out_i, *(params + in_i));
}
};
template <typename T, typename IndexT, typename Functor>
__global__ void GraphSendRecvCUDAKernel(const T* params,
const IndexT* src_indices,
const IndexT* dst_indices, T* output,
size_t index_size, size_t slice_size,
Functor functor) {
CUDA_KERNEL_LOOP_TYPE(i, index_size * slice_size, int64_t) {
int64_t indices_i = i / slice_size;
int64_t slice_i = i - indices_i * slice_size;
IndexT src_i = src_indices[indices_i];
IndexT dst_i = dst_indices[indices_i];
int64_t in_i = src_i * slice_size + slice_i;
int64_t out_i = dst_i * slice_size + slice_i;
functor(params, output, in_i, out_i);
}
}
// For max
template <typename T>
__global__ void InputResetMaxCUDAKernel(T* output, size_t input_size,
size_t slice_size) {
CUDA_KERNEL_LOOP_TYPE(i, input_size * slice_size, int64_t) {
if (*(output + i) == std::numeric_limits<T>::min()) {
*(output + i) = 0;
}
}
}
// For min
template <typename T>
__global__ void InputResetMinCUDAKernel(T* output, size_t input_size,
size_t slice_size) {
CUDA_KERNEL_LOOP_TYPE(i, input_size * slice_size, int64_t) {
if (*(output + i) == std::numeric_limits<T>::max()) {
*(output + i) = 0;
}
}
}
// Get dst_count
template <typename T, typename IndexT>
__global__ void ComputeCountCUDAKernel(int* count, const IndexT* dst_indices,
size_t index_size) {
CUDA_KERNEL_LOOP_TYPE(i, index_size, int64_t) {
IndexT dst_i = dst_indices[i];
paddle::platform::CudaAtomicAdd(count + dst_i, 1);
}
}
// For forward mean
template <typename T>
__global__ void ManipulateMeanCUDAKernel(T* output, int* count,
size_t input_size, size_t slice_size) {
CUDA_KERNEL_LOOP_TYPE(i, input_size * slice_size, int64_t) {
int64_t c_index = i / slice_size;
if (*(count + c_index) > 1) {
*(output + i) = *(output + i) / *(count + c_index);
}
}
}
// For backward mean
template <typename T, typename IndexT>
__global__ void ManipulateMeanGradCUDAKernel(
const T* params, const IndexT* src_indices, const IndexT* dst_indices,
T* output, size_t index_size, size_t slice_size, const int* dst_count) {
CUDA_KERNEL_LOOP_TYPE(i, index_size * slice_size, int64_t) {
int64_t indices_i = i / slice_size;
int64_t slice_i = i - indices_i * slice_size;
IndexT src_i = src_indices[indices_i];
IndexT dst_i = dst_indices[indices_i];
int64_t in_i = src_i * slice_size + slice_i;
int64_t out_i = dst_i * slice_size + slice_i;
paddle::platform::CudaAtomicAdd(output + out_i,
*(params + in_i) / dst_count[src_i]);
}
}
// For backward min and max
template <typename T, typename IndexT>
__global__ void ManipulateMinMaxGradCUDAKernel(
const T* params, const IndexT* src_indices, const IndexT* dst_indices,
T* output, size_t index_size, size_t slice_size, const T* ptr_input,
const T* ptr_output) {
CUDA_KERNEL_LOOP_TYPE(i, index_size * slice_size, int64_t) {
int64_t indices_i = i / slice_size;
int64_t slice_i = i - indices_i * slice_size;
IndexT src_i = src_indices[indices_i];
IndexT dst_i = dst_indices[indices_i];
int64_t in_i = src_i * slice_size + slice_i;
int64_t out_i = dst_i * slice_size + slice_i;
paddle::platform::CudaAtomicAdd(
output + out_i,
*(params + in_i) * (*(ptr_input + out_i) == *(ptr_output + in_i)));
}
}
template <typename DeviceContext, typename T, typename IndexT>
void GraphSendRecvOpCUDAKernelLaunchHelper(
const framework::ExecutionContext& ctx, const Tensor& src_index,
const Tensor& dst_index) {
auto* X = ctx.Input<Tensor>("X");
auto* Y = ctx.Output<Tensor>("Out");
std::string pool_type = ctx.Attr<std::string>("pool_type");
const int& index_size = src_index.dims()[0];
T* p_output = Y->mutable_data<T>(ctx.GetPlace());
const auto& src_dims = X->dims();
int64_t memset_size = 1;
for (int i = 0; i < src_dims.size(); ++i) {
memset_size *= src_dims[i];
}
const size_t& memset_bytes = memset_size * sizeof(T);
if (pool_type == "SUM" || pool_type == "MEAN") {
#ifdef PADDLE_WITH_HIP
hipMemset(p_output, 0, memset_bytes);
#else
cudaMemset(p_output, 0, memset_bytes);
#endif
} else if (pool_type == "MAX") {
thrust::device_ptr<T> p_output_ptr(p_output);
thrust::fill(thrust::device, p_output_ptr, p_output_ptr + memset_size,
std::numeric_limits<T>::min());
} else if (pool_type == "MIN") {
thrust::device_ptr<T> p_output_ptr(p_output);
thrust::fill(thrust::device, p_output_ptr, p_output_ptr + memset_size,
std::numeric_limits<T>::max());
}
if (index_size == 0) return;
int64_t slice_size = 1;
for (int i = 1; i < src_dims.size(); ++i) {
slice_size *= src_dims[i];
}
const T* p_src = X->data<T>();
const IndexT* s_index = src_index.data<IndexT>();
const IndexT* d_index = dst_index.data<IndexT>();
#ifdef PADDLE_WITH_HIP
int block = 256;
#else
int block = 1024;
#endif
int64_t n = slice_size * index_size;
const auto& dev_ctx = ctx.cuda_device_context();
int64_t max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize().x;
int64_t grid_tmp = (n + block - 1) / block;
int64_t grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx;
int64_t input_size = src_dims[0];
if (pool_type == "SUM") {
GraphSendRecvSumCUDAFunctor<T, IndexT> functor;
GraphSendRecvCUDAKernel<T, IndexT,
GraphSendRecvSumCUDAFunctor<T, IndexT>><<<
grid, block, 0, reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream()>>>(p_src, s_index, d_index, p_output,
index_size, slice_size, functor);
} else if (pool_type == "MAX") {
GraphSendRecvMaxCUDAFunctor<T, IndexT> functor;
GraphSendRecvCUDAKernel<T, IndexT,
GraphSendRecvMaxCUDAFunctor<T, IndexT>><<<
grid, block, 0, reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream()>>>(p_src, s_index, d_index, p_output,
index_size, slice_size, functor);
int64_t grid_max_tmp = (input_size * slice_size + block - 1) / block;
int64_t grid_max =
grid_max_tmp < max_grid_dimx ? grid_max_tmp : max_grid_dimx;
InputResetMaxCUDAKernel<
T><<<grid_max, block, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream()>>>(p_output, input_size, slice_size);
} else if (pool_type == "MIN") {
GraphSendRecvMinCUDAFunctor<T, IndexT> functor;
GraphSendRecvCUDAKernel<T, IndexT,
GraphSendRecvMinCUDAFunctor<T, IndexT>><<<
grid, block, 0, reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream()>>>(p_src, s_index, d_index, p_output,
index_size, slice_size, functor);
int64_t grid_min_tmp = (input_size * slice_size + block - 1) / block;
int64_t grid_min =
grid_min_tmp < max_grid_dimx ? grid_min_tmp : max_grid_dimx;
InputResetMinCUDAKernel<
T><<<grid_min, block, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream()>>>(p_output, input_size, slice_size);
} else if (pool_type == "MEAN") {
GraphSendRecvSumCUDAFunctor<T, IndexT> functor;
GraphSendRecvCUDAKernel<T, IndexT,
GraphSendRecvSumCUDAFunctor<T, IndexT>><<<
grid, block, 0, reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream()>>>(p_src, s_index, d_index, p_output,
index_size, slice_size, functor);
auto* dst_count = ctx.Output<Tensor>("Dst_count");
int* p_dst_count = dst_count->mutable_data<int>(ctx.GetPlace());
#ifdef PADDLE_WITH_HIP
hipMemset(p_dst_count, 0, input_size * sizeof(int));
#else
cudaMemset(p_dst_count, 0, input_size * sizeof(int));
#endif
int64_t grid_count = (index_size + block - 1) / block;
ComputeCountCUDAKernel<
T, IndexT><<<grid_count, block, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream()>>>(p_dst_count, d_index, index_size);
int64_t grid_mean_tmp = (input_size * slice_size + block - 1) / block;
int64_t grid_mean =
grid_mean_tmp < max_grid_dimx ? grid_mean_tmp : max_grid_dimx;
ManipulateMeanCUDAKernel<
T><<<grid_mean, block, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream()>>>(p_output, p_dst_count, input_size, slice_size);
}
}
template <typename DeviceContext, typename T, typename IndexT>
void GraphSendRecvGradOpCUDAKernelLaunchHelper(
const framework::ExecutionContext& ctx, const Tensor& src_index,
const Tensor& dst_index) {
auto* X = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* Y = ctx.Output<Tensor>(framework::GradVarName("X"));
std::string pool_type = ctx.Attr<std::string>("pool_type");
const int& index_size = src_index.dims()[0];
T* p_output = Y->mutable_data<T>(ctx.GetPlace());
const auto& src_dims = X->dims();
int64_t memset_size = 1;
for (int i = 0; i < src_dims.size(); ++i) {
memset_size *= src_dims[i];
}
const size_t& memset_bytes = memset_size * sizeof(T);
#ifdef PADDLE_WITH_HIP
hipMemset(p_output, 0, memset_bytes);
#else
cudaMemset(p_output, 0, memset_bytes);
#endif
if (index_size == 0) return;
int64_t slice_size = 1;
for (int i = 1; i < src_dims.size(); ++i) {
slice_size *= src_dims[i];
}
const T* p_src = X->data<T>();
const IndexT* s_index = src_index.data<IndexT>();
const IndexT* d_index = dst_index.data<IndexT>();
#ifdef PADDLE_WITH_HIP
int block = 256;
#else
int block = 1024;
#endif
int64_t n = slice_size * index_size;
const auto& dev_ctx = ctx.cuda_device_context();
int64_t max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize().x;
int64_t grid_tmp = (n + block - 1) / block;
int64_t grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx;
int64_t input_size = src_dims[0];
if (pool_type == "SUM") {
GraphSendRecvSumCUDAFunctor<T, IndexT> functor;
GraphSendRecvCUDAKernel<T, IndexT,
GraphSendRecvSumCUDAFunctor<T, IndexT>><<<
grid, block, 0, reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream()>>>(p_src, s_index, d_index, p_output,
index_size, slice_size, functor);
} else if (pool_type == "MEAN") {
auto* dst_count = ctx.Input<Tensor>("Dst_count");
const int* s_count = dst_count->data<int>();
ManipulateMeanGradCUDAKernel<T, IndexT><<<
grid, block, 0, reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream()>>>(p_src, s_index, d_index, p_output,
index_size, slice_size, s_count);
} else if (pool_type == "MAX" || pool_type == "MIN") {
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Input<Tensor>("Out");
const T* ptr_input = input->data<T>();
const T* ptr_output = output->data<T>();
ManipulateMinMaxGradCUDAKernel<T, IndexT><<<
grid, block, 0, reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream()>>>(p_src, s_index, d_index, p_output,
index_size, slice_size, ptr_input,
ptr_output);
}
}
template <typename DeviceContext, typename T>
class GraphSendRecvOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* src_index = ctx.Input<Tensor>("Src_index");
auto* dst_index = ctx.Input<Tensor>("Dst_index");
auto index_type = src_index->type();
if (index_type == framework::proto::VarType::INT32) {
GraphSendRecvOpCUDAKernelLaunchHelper<DeviceContext, T, int>(
ctx, *src_index, *dst_index);
} else if (index_type == framework::proto::VarType::INT64) {
GraphSendRecvOpCUDAKernelLaunchHelper<DeviceContext, T, int64_t>(
ctx, *src_index, *dst_index);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Unsupported Src_index or Dst_index dtype, expected int, int64, but "
"got %s.",
index_type));
}
}
};
template <typename DeviceContext, typename T>
class GraphSendRecvGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* src_index = ctx.Input<Tensor>("Dst_index");
auto* dst_index = ctx.Input<Tensor>("Src_index");
auto index_type = src_index->type();
if (index_type == framework::proto::VarType::INT32) {
GraphSendRecvGradOpCUDAKernelLaunchHelper<DeviceContext, T, int>(
ctx, *src_index, *dst_index);
} else if (index_type == framework::proto::VarType::INT64) {
GraphSendRecvGradOpCUDAKernelLaunchHelper<DeviceContext, T, int64_t>(
ctx, *src_index, *dst_index);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Unsupported Src_index or Dst_index dtype, expected int, int64, but "
"got %s.",
index_type));
}
}
};
} // namespace operators
} // namespace paddle
using CUDA = paddle::platform::CUDADeviceContext;
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(graph_send_recv,
ops::GraphSendRecvOpCUDAKernel<CUDA, float>,
ops::GraphSendRecvOpCUDAKernel<CUDA, double>,
ops::GraphSendRecvOpCUDAKernel<CUDA, int>,
ops::GraphSendRecvOpCUDAKernel<CUDA, int64_t>);
REGISTER_OP_CUDA_KERNEL(graph_send_recv_grad,
ops::GraphSendRecvGradOpCUDAKernel<CUDA, float>,
ops::GraphSendRecvGradOpCUDAKernel<CUDA, double>,
ops::GraphSendRecvGradOpCUDAKernel<CUDA, int>,
ops::GraphSendRecvGradOpCUDAKernel<CUDA, int64_t>);
|
176fa23fb60c8fa97906829bafbd715a11719743.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../parquet.h"
#include "../parquet_gpu.h"
#include "parquet_reader_impl.hpp"
#include <io/comp/gpuinflate.h>
#include <algorithm>
#include <array>
#include <rmm/device_buffer.hpp>
#include <rmm/thrust_rmm_allocator.h>
namespace cudf {
namespace io {
namespace parquet {
#if 0
#define LOG_PRINTF(...) std::printf(__VA_ARGS__)
#else
#define LOG_PRINTF(...) (void)0
#endif
/**
* @brief Function that translates cuDF time unit to Parquet clock frequency
**/
constexpr int32_t to_clockrate(gdf_time_unit time_unit) {
switch (time_unit) {
case TIME_UNIT_s:
return 1;
case TIME_UNIT_ms:
return 1000;
case TIME_UNIT_us:
return 1000000;
case TIME_UNIT_ns:
return 1000000000;
default:
return 0;
}
}
/**
* @brief Function that translates Parquet datatype to GDF dtype
**/
constexpr std::pair<gdf_dtype, gdf_dtype_extra_info> to_dtype(
parquet::Type physical, parquet::ConvertedType logical,
bool strings_to_categorical, gdf_time_unit ts_unit, int32_t decimal_scale) {
// Logical type used for actual data interpretation; the legacy converted type
// is superceded by 'logical' type whenever available.
switch (logical) {
case parquet::UINT_8:
case parquet::INT_8:
return std::make_pair(GDF_INT8, gdf_dtype_extra_info{TIME_UNIT_NONE});
case parquet::UINT_16:
case parquet::INT_16:
return std::make_pair(GDF_INT16, gdf_dtype_extra_info{TIME_UNIT_NONE});
case parquet::DATE:
return std::make_pair(GDF_DATE32, gdf_dtype_extra_info{TIME_UNIT_NONE});
case parquet::TIMESTAMP_MICROS:
return (ts_unit != TIME_UNIT_NONE)
? std::make_pair(GDF_TIMESTAMP, gdf_dtype_extra_info{ts_unit})
: std::make_pair(GDF_TIMESTAMP,
gdf_dtype_extra_info{TIME_UNIT_us});
case parquet::TIMESTAMP_MILLIS:
return (ts_unit != TIME_UNIT_NONE)
? std::make_pair(GDF_TIMESTAMP, gdf_dtype_extra_info{ts_unit})
: std::make_pair(GDF_TIMESTAMP,
gdf_dtype_extra_info{TIME_UNIT_ms});
case parquet::DECIMAL:
if (decimal_scale != 0 || (physical != parquet::INT32 && physical != parquet::INT64)) {
return std::make_pair(GDF_FLOAT64, gdf_dtype_extra_info{TIME_UNIT_NONE});
}
break;
default:
break;
}
// Physical storage type supported by Parquet; controls the on-disk storage
// format in combination with the encoding type.
switch (physical) {
case parquet::BOOLEAN:
return std::make_pair(GDF_BOOL8, gdf_dtype_extra_info{TIME_UNIT_NONE});
case parquet::INT32:
return std::make_pair(GDF_INT32, gdf_dtype_extra_info{TIME_UNIT_NONE});
case parquet::INT64:
return std::make_pair(GDF_INT64, gdf_dtype_extra_info{TIME_UNIT_NONE});
case parquet::FLOAT:
return std::make_pair(GDF_FLOAT32, gdf_dtype_extra_info{TIME_UNIT_NONE});
case parquet::DOUBLE:
return std::make_pair(GDF_FLOAT64, gdf_dtype_extra_info{TIME_UNIT_NONE});
case parquet::BYTE_ARRAY:
case parquet::FIXED_LEN_BYTE_ARRAY:
// Can be mapped to GDF_CATEGORY (32-bit hash) or GDF_STRING (nvstring)
return std::make_pair(strings_to_categorical ? GDF_CATEGORY : GDF_STRING,
gdf_dtype_extra_info{TIME_UNIT_NONE});
case parquet::INT96:
return (ts_unit != TIME_UNIT_NONE)
? std::make_pair(GDF_TIMESTAMP, gdf_dtype_extra_info{ts_unit})
: std::make_pair(GDF_TIMESTAMP,
gdf_dtype_extra_info{TIME_UNIT_ns});
default:
break;
}
return std::make_pair(GDF_invalid, gdf_dtype_extra_info{TIME_UNIT_NONE});
}
/**
* @brief Helper that returns the required the number of bits to store a value
**/
template <typename T = uint8_t>
T required_bits(uint32_t max_level) {
return static_cast<T>(
parquet::CompactProtocolReader::NumRequiredBits(max_level));
}
/**
* @brief A helper wrapper for Parquet file metadata. Provides some additional
* convenience methods for initializing and accessing the metadata and schema
**/
struct ParquetMetadata : public parquet::FileMetaData {
explicit ParquetMetadata(datasource *source) {
constexpr auto header_len = sizeof(parquet::file_header_s);
constexpr auto ender_len = sizeof(parquet::file_ender_s);
const auto len = source->size();
const auto header_buffer = source->get_buffer(0, header_len);
const auto header = (const parquet::file_header_s *)header_buffer->data();
const auto ender_buffer = source->get_buffer(len - ender_len, ender_len);
const auto ender = (const parquet::file_ender_s *)ender_buffer->data();
CUDF_EXPECTS(len > header_len + ender_len, "Incorrect data source");
CUDF_EXPECTS(
header->magic == PARQUET_MAGIC && ender->magic == PARQUET_MAGIC,
"Corrupted header or footer");
CUDF_EXPECTS(ender->footer_len != 0 &&
ender->footer_len <= (len - header_len - ender_len),
"Incorrect footer length");
const auto buffer = source->get_buffer(len - ender->footer_len - ender_len, ender->footer_len);
parquet::CompactProtocolReader cp(buffer->data(), ender->footer_len);
CUDF_EXPECTS(cp.read(this), "Cannot parse metadata");
CUDF_EXPECTS(cp.InitSchema(this), "Cannot initialize schema");
print_metadata();
}
inline int get_total_rows() const { return num_rows; }
inline int get_num_row_groups() const { return row_groups.size(); }
inline int get_num_columns() const { return row_groups[0].columns.size(); }
std::string get_column_name(const std::vector<std::string> &path_in_schema) {
std::string s = (path_in_schema.size() > 0) ? path_in_schema[0] : "";
for (size_t i = 1; i < path_in_schema.size(); i++) {
s += "." + path_in_schema[i];
}
return s;
}
std::vector<std::string> get_column_names() {
std::vector<std::string> all_names;
for (const auto &chunk : row_groups[0].columns) {
all_names.emplace_back(get_column_name(chunk.meta_data.path_in_schema));
}
return all_names;
}
/**
* @brief Extracts the column name used for the row indexes in a dataframe
*
* PANDAS adds its own metadata to the key_value section when writing out the
* dataframe to a file to aid in exact reconstruction. The JSON-formatted
* metadata contains the index column(s) and PANDA-specific datatypes.
*
* @return std::string Name of the index column
**/
std::string get_pandas_index_name() {
auto it =
std::find_if(key_value_metadata.begin(), key_value_metadata.end(),
[](const auto &item) { return item.key == "pandas"; });
if (it != key_value_metadata.end()) {
const auto pos = it->value.find("index_columns");
if (pos != std::string::npos) {
const auto begin = it->value.find('[', pos);
const auto end = it->value.find(']', begin);
if ((end - begin) > 1) {
return it->value.substr(begin + 2, end - begin - 3);
}
}
}
return "";
}
/**
* @brief Filters and reduces down to a selection of row groups
*
* @param[in] row_group Index of the row group to select
* @param[in,out] row_start Starting row of the selection
* @param[in,out] row_count Total number of rows selected
*
* @return List of row group indexes and its starting row
**/
auto select_row_groups(int row_group, int &row_start, int &row_count) {
std::vector<std::pair<int, int>> selection;
if (row_group != -1) {
CUDF_EXPECTS(row_group < get_num_row_groups(), "Non-existent row group");
for (int i = 0; i < row_group; ++i) {
row_start += row_groups[i].num_rows;
}
selection.emplace_back(row_group, row_start);
row_count = row_groups[row_group].num_rows;
} else {
row_start = ::max(row_start, 0);
if (row_count == -1) {
row_count = get_total_rows();
}
CUDF_EXPECTS(row_count >= 0, "Invalid row count");
CUDF_EXPECTS(row_start <= get_total_rows(), "Invalid row start");
for (int i = 0, count = 0; i < (int)row_groups.size(); ++i) {
count += row_groups[i].num_rows;
if (count > row_start || count == 0) {
selection.emplace_back(i, count - row_groups[i].num_rows);
}
if (count >= (row_start + row_count)) {
break;
}
}
}
return selection;
}
/**
* @brief Filters and reduces down to a selection of columns
*
* @param[in] use_names List of column names to select
* @param[in] include_index Whether to always include the PANDAS index column
* @param[in] pandas_index Name of the PANDAS index column
*
* @return List of column names
**/
auto select_columns(std::vector<std::string> use_names, bool include_index,
const std::string &pandas_index) {
std::vector<std::pair<int, std::string>> selection;
const auto names = get_column_names();
if (use_names.empty()) {
// No columns specified; include all in the dataset
for (const auto &name : names) {
selection.emplace_back(selection.size(), name);
}
} else {
// Load subset of columns; include PANDAS index unless excluded
if (include_index) {
if (std::find(use_names.begin(), use_names.end(), pandas_index) ==
use_names.end()) {
use_names.push_back(pandas_index);
}
}
for (const auto &use_name : use_names) {
for (size_t i = 0; i < names.size(); ++i) {
if (names[i] == use_name) {
selection.emplace_back(i, names[i]);
break;
}
}
}
}
return selection;
}
void print_metadata() const {
LOG_PRINTF("\n[+] Metadata:\n");
LOG_PRINTF(" version = %d\n", version);
LOG_PRINTF(" created_by = \"%s\"\n", created_by.c_str());
LOG_PRINTF(" schema (%zd entries):\n", schema.size());
for (size_t i = 0; i < schema.size(); i++) {
LOG_PRINTF(
" [%zd] type=%d, name=\"%s\", num_children=%d, rep_type=%d, "
"max_def_lvl=%d, max_rep_lvl=%d\n",
i, schema[i].type, schema[i].name.c_str(), schema[i].num_children,
schema[i].repetition_type, schema[i].max_definition_level,
schema[i].max_repetition_level);
}
LOG_PRINTF(" num rows = %zd\n", (size_t)num_rows);
LOG_PRINTF(" num row groups = %zd\n", row_groups.size());
LOG_PRINTF(" num columns = %zd\n", row_groups[0].columns.size());
}
};
size_t reader::Impl::count_page_headers(
const hostdevice_vector<parquet::gpu::ColumnChunkDesc> &chunks) {
size_t total_pages = 0;
CUDA_TRY(hipMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(),
chunks.memory_size(), hipMemcpyHostToDevice));
CUDA_TRY(parquet::gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size()));
CUDA_TRY(hipMemcpyAsync(chunks.host_ptr(), chunks.device_ptr(),
chunks.memory_size(), hipMemcpyDeviceToHost));
CUDA_TRY(hipStreamSynchronize(0));
LOG_PRINTF("[+] Chunk Information\n");
for (size_t c = 0; c < chunks.size(); c++) {
LOG_PRINTF(
" %2zd: comp_data=%ld, comp_size=%zd, num_values=%zd\n "
"start_row=%zd num_rows=%d max_def_level=%d max_rep_level=%d\n "
"data_type=%d def_level_bits=%d rep_level_bits=%d\n "
"num_data_pages=%d num_dict_pages=%d max_num_pages=%d\n",
c, (uint64_t)chunks[c].compressed_data, chunks[c].compressed_size,
chunks[c].num_values, chunks[c].start_row, chunks[c].num_rows,
chunks[c].max_def_level, chunks[c].max_rep_level, chunks[c].data_type,
chunks[c].def_level_bits, chunks[c].rep_level_bits,
chunks[c].num_data_pages, chunks[c].num_dict_pages,
chunks[c].max_num_pages);
total_pages += chunks[c].num_data_pages + chunks[c].num_dict_pages;
}
return total_pages;
}
void reader::Impl::decode_page_headers(
const hostdevice_vector<parquet::gpu::ColumnChunkDesc> &chunks,
const hostdevice_vector<parquet::gpu::PageInfo> &pages) {
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
chunks[c].max_num_pages = chunks[c].num_data_pages + chunks[c].num_dict_pages;
chunks[c].page_info = pages.device_ptr(page_count);
page_count += chunks[c].max_num_pages;
}
CUDA_TRY(hipMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(),
chunks.memory_size(), hipMemcpyHostToDevice));
CUDA_TRY(parquet::gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size()));
CUDA_TRY(hipMemcpyAsync(pages.host_ptr(), pages.device_ptr(),
pages.memory_size(), hipMemcpyDeviceToHost));
CUDA_TRY(hipStreamSynchronize(0));
LOG_PRINTF("[+] Page Header Information\n");
for (size_t i = 0; i < pages.size(); i++) {
LOG_PRINTF(
" %2zd: comp_size=%d, uncomp_size=%d, num_values=%d, chunk_row=%d, "
"num_rows=%d\n chunk_idx=%d, flags=%d, encoding=%d, def_level=%d "
"rep_level=%d, valid_count=%d\n",
i, pages[i].compressed_page_size, pages[i].uncompressed_page_size,
pages[i].num_values, pages[i].chunk_row, pages[i].num_rows,
pages[i].chunk_idx, pages[i].flags, pages[i].encoding,
pages[i].definition_level_encoding, pages[i].repetition_level_encoding,
pages[i].valid_count);
}
}
rmm::device_buffer reader::Impl::decompress_page_data(
const hostdevice_vector<parquet::gpu::ColumnChunkDesc> &chunks,
const hostdevice_vector<parquet::gpu::PageInfo> &pages) {
auto for_each_codec_page = [&](parquet::Compression codec,
const std::function<void(size_t)> &f) {
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
const auto page_stride = chunks[c].max_num_pages;
if (chunks[c].codec == codec) {
for (int k = 0; k < page_stride; k++) {
f(page_count + k);
}
}
page_count += page_stride;
}
};
// Brotli scratch memory for decompressing
rmm::device_vector<uint8_t> debrotli_scratch;
// Count the exact number of compressed pages
size_t num_compressed_pages = 0;
size_t total_decompressed_size = 0;
std::array<std::pair<parquet::Compression, size_t>, 3> codecs{
std::make_pair(parquet::GZIP, 0), std::make_pair(parquet::SNAPPY, 0),
std::make_pair(parquet::BROTLI, 0)};
for (auto &codec : codecs) {
for_each_codec_page(codec.first, [&](size_t page) {
total_decompressed_size += pages[page].uncompressed_page_size;
codec.second++;
num_compressed_pages++;
});
if (codec.first == parquet::BROTLI && codec.second > 0) {
debrotli_scratch.resize(get_gpu_debrotli_scratch_size(codec.second));
}
}
LOG_PRINTF(
"[+] Compression\n Total compressed size: %zd\n Number of "
"compressed pages: %zd\n gzip: %zd \n snappy: %zd\n",
total_decompressed_size, num_compressed_pages, codecs[0].second,
codecs[1].second);
// Dispatch batches of pages to decompress for each codec
rmm::device_buffer decomp_pages(align_size(total_decompressed_size));
hostdevice_vector<gpu_inflate_input_s> inflate_in(0, num_compressed_pages);
hostdevice_vector<gpu_inflate_status_s> inflate_out(0, num_compressed_pages);
size_t decomp_offset = 0;
int32_t argc = 0;
for (const auto &codec : codecs) {
if (codec.second > 0) {
int32_t start_pos = argc;
for_each_codec_page(codec.first, [&](size_t page) {
auto dst_base = static_cast<uint8_t *>(decomp_pages.data());
inflate_in[argc].srcDevice = pages[page].page_data;
inflate_in[argc].srcSize = pages[page].compressed_page_size;
inflate_in[argc].dstDevice = dst_base + decomp_offset;
inflate_in[argc].dstSize = pages[page].uncompressed_page_size;
inflate_out[argc].bytes_written = 0;
inflate_out[argc].status = static_cast<uint32_t>(-1000);
inflate_out[argc].reserved = 0;
pages[page].page_data = (uint8_t *)inflate_in[argc].dstDevice;
decomp_offset += inflate_in[argc].dstSize;
argc++;
});
CUDA_TRY(hipMemcpyAsync(
inflate_in.device_ptr(start_pos), inflate_in.host_ptr(start_pos),
sizeof(decltype(inflate_in)::value_type) * (argc - start_pos),
hipMemcpyHostToDevice));
CUDA_TRY(hipMemcpyAsync(
inflate_out.device_ptr(start_pos),
inflate_out.host_ptr(start_pos),
sizeof(decltype(inflate_out)::value_type) * (argc - start_pos),
hipMemcpyHostToDevice));
switch (codec.first) {
case parquet::GZIP:
CUDA_TRY(gpuinflate(inflate_in.device_ptr(start_pos),
inflate_out.device_ptr(start_pos),
argc - start_pos, 1))
break;
case parquet::SNAPPY:
CUDA_TRY(gpu_unsnap(inflate_in.device_ptr(start_pos),
inflate_out.device_ptr(start_pos),
argc - start_pos));
break;
case parquet::BROTLI:
CUDA_TRY(gpu_debrotli(inflate_in.device_ptr(start_pos),
inflate_out.device_ptr(start_pos),
debrotli_scratch.data().get(),
debrotli_scratch.size(), argc - start_pos));
break;
default:
CUDF_EXPECTS(false, "Unexpected decompression dispatch");
break;
}
CUDA_TRY(hipMemcpyAsync(
inflate_out.host_ptr(start_pos),
inflate_out.device_ptr(start_pos),
sizeof(decltype(inflate_out)::value_type) * (argc - start_pos),
hipMemcpyDeviceToHost));
}
}
CUDA_TRY(hipStreamSynchronize(0));
// Update the page information in device memory with the updated value of
// page_data; it now points to the uncompressed data buffer
CUDA_TRY(hipMemcpyAsync(pages.device_ptr(), pages.host_ptr(),
pages.memory_size(), hipMemcpyHostToDevice));
return decomp_pages;
}
void reader::Impl::decode_page_data(
const hostdevice_vector<parquet::gpu::ColumnChunkDesc> &chunks,
const hostdevice_vector<parquet::gpu::PageInfo> &pages,
const std::vector<gdf_column *> &chunk_map, size_t min_row,
size_t total_rows) {
auto is_dict_chunk = [](const parquet::gpu::ColumnChunkDesc &chunk) {
return (chunk.data_type & 0x7) == parquet::BYTE_ARRAY &&
chunk.num_dict_pages > 0;
};
// Count the number of string dictionary entries
// NOTE: Assumes first page in the chunk is always the dictionary page
size_t total_str_dict_indexes = 0;
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
if (is_dict_chunk(chunks[c])) {
total_str_dict_indexes += pages[page_count].num_values;
}
page_count += chunks[c].max_num_pages;
}
// Build index for string dictionaries since they can't be indexed
// directly due to variable-sized elements
rmm::device_vector<parquet::gpu::nvstrdesc_s> str_dict_index;
if (total_str_dict_indexes > 0) {
str_dict_index.resize(total_str_dict_indexes);
}
// Update chunks with pointers to column data
for (size_t c = 0, page_count = 0, str_ofs = 0; c < chunks.size(); c++) {
if (is_dict_chunk(chunks[c])) {
chunks[c].str_dict_index = str_dict_index.data().get() + str_ofs;
str_ofs += pages[page_count].num_values;
}
chunks[c].valid_map_base = (uint32_t *)chunk_map[c]->valid;
chunks[c].column_data_base = chunk_map[c]->data;
page_count += chunks[c].max_num_pages;
}
CUDA_TRY(hipMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(),
chunks.memory_size(), hipMemcpyHostToDevice));
if (total_str_dict_indexes > 0) {
CUDA_TRY(BuildStringDictionaryIndex(chunks.device_ptr(), chunks.size()));
}
CUDA_TRY(DecodePageData(pages.device_ptr(), pages.size(), chunks.device_ptr(),
chunks.size(), total_rows, min_row));
CUDA_TRY(hipMemcpyAsync(pages.host_ptr(), pages.device_ptr(),
pages.memory_size(), hipMemcpyDeviceToHost));
CUDA_TRY(hipStreamSynchronize(0));
LOG_PRINTF("[+] Page Data Information\n");
for (size_t i = 0; i < pages.size(); i++) {
if (pages[i].num_rows > 0) {
LOG_PRINTF(" %2zd: valid_count=%d/%d\n", i, pages[i].valid_count,
pages[i].num_rows);
const size_t c = pages[i].chunk_idx;
if (c < chunks.size()) {
chunk_map[c]->null_count += pages[i].num_rows - pages[i].valid_count;
}
}
}
}
reader::Impl::Impl(std::unique_ptr<datasource> source,
reader_options const &options)
: source_(std::move(source)) {
// Open and parse the source Parquet dataset metadata
md_ = std::make_unique<ParquetMetadata>(source_.get());
// Store the index column (PANDAS-specific)
pandas_index_col_ = md_->get_pandas_index_name();
// Select only columns required by the options
selected_cols_ = md_->select_columns(
options.columns, options.use_pandas_metadata, pandas_index_col_);
// Override output timestamp resolution if requested
if (options.timestamp_unit != TIME_UNIT_NONE) {
timestamp_unit_ = options.timestamp_unit;
}
// Strings may be returned as either GDF_STRING or GDF_CATEGORY columns
strings_to_categorical_ = options.strings_to_categorical;
}
table reader::Impl::read(int skip_rows, int num_rows, int row_group) {
// Select only row groups required
const auto selected_row_groups =
md_->select_row_groups(row_group, skip_rows, num_rows);
const auto num_columns = selected_cols_.size();
// Return empty table rather than exception if nothing to load
if (selected_row_groups.empty() || selected_cols_.empty()) {
return cudf::table{};
}
// Initialize gdf_columns, but hold off on allocating storage space
LOG_PRINTF("[+] Selected row groups: %d\n", (int)selected_row_groups.size());
LOG_PRINTF("[+] Selected columns: %d\n", (int)num_columns);
LOG_PRINTF("[+] Selected skip_rows: %d num_rows: %d\n", skip_rows, num_rows);
std::vector<gdf_column_wrapper> columns;
for (const auto &col : selected_cols_) {
auto row_group_0 = md_->row_groups[selected_row_groups[0].first];
auto &col_schema = md_->schema[row_group_0.columns[col.first].schema_idx];
auto dtype_info = to_dtype(col_schema.type, col_schema.converted_type,
strings_to_categorical_, timestamp_unit_, col_schema.decimal_scale);
columns.emplace_back(static_cast<cudf::size_type>(num_rows), dtype_info.first,
dtype_info.second, col.second);
LOG_PRINTF(" %2zd: name=%s size=%zd type=%d data=%lx valid=%lx\n",
columns.size() - 1, columns.back()->col_name,
(size_t)columns.back()->size, columns.back()->dtype,
(uint64_t)columns.back()->data, (uint64_t)columns.back()->valid);
}
// Descriptors for all the chunks that make up the selected columns
const auto num_column_chunks = selected_row_groups.size() * num_columns;
hostdevice_vector<parquet::gpu::ColumnChunkDesc> chunks(0, num_column_chunks);
// Association between each column chunk and its gdf_column
std::vector<gdf_column *> chunk_map(num_column_chunks);
// Tracker for eventually deallocating compressed and uncompressed data
std::vector<rmm::device_buffer> page_data(num_column_chunks);
// Initialize column chunk info
LOG_PRINTF("[+] Column Chunk Description\n");
size_t total_decompressed_size = 0;
auto remaining_rows = num_rows;
for (const auto &rg : selected_row_groups) {
const auto row_group = md_->row_groups[rg.first];
const auto row_group_start = rg.second;
const auto row_group_rows = ::min(remaining_rows, (int)row_group.num_rows);
for (size_t i = 0; i < num_columns; ++i) {
auto col = selected_cols_[i];
auto &col_meta = row_group.columns[col.first].meta_data;
auto &col_schema = md_->schema[row_group.columns[col.first].schema_idx];
auto &gdf_column = columns[i];
// Spec requires each row group to contain exactly one chunk for every
// column. If there are too many or too few, continue with best effort
if (col.second != md_->get_column_name(col_meta.path_in_schema)) {
std::cerr << "Detected mismatched column chunk" << std::endl;
continue;
}
if (chunks.size() >= chunks.max_size()) {
std::cerr << "Detected too many column chunks" << std::endl;
continue;
}
int32_t type_width = (col_schema.type == parquet::FIXED_LEN_BYTE_ARRAY)
? col_schema.type_length
: 0;
int32_t ts_clock_rate = 0;
if (gdf_column->dtype == GDF_INT8)
type_width = 1; // I32 -> I8
else if (gdf_column->dtype == GDF_INT16)
type_width = 2; // I32 -> I16
else if (gdf_column->dtype == GDF_CATEGORY)
type_width = 4; // str -> hash32
else if (gdf_column->dtype == GDF_TIMESTAMP)
ts_clock_rate = to_clockrate(timestamp_unit_);
int8_t converted_type = col_schema.converted_type;
if (converted_type == parquet::DECIMAL && gdf_column->dtype != GDF_FLOAT64) {
converted_type = parquet::UNKNOWN; // Not converting to float64
}
uint8_t *d_compdata = nullptr;
if (col_meta.total_compressed_size != 0) {
const auto offset = (col_meta.dictionary_page_offset != 0)
? ::min(col_meta.data_page_offset,
col_meta.dictionary_page_offset)
: col_meta.data_page_offset;
const auto buffer =
source_->get_buffer(offset, col_meta.total_compressed_size);
page_data[chunks.size()] = rmm::device_buffer(buffer->data(), buffer->size());
d_compdata = static_cast<uint8_t *>(page_data[chunks.size()].data());
}
chunks.insert(parquet::gpu::ColumnChunkDesc(
col_meta.total_compressed_size, d_compdata, col_meta.num_values,
col_schema.type, type_width, row_group_start, row_group_rows,
col_schema.max_definition_level, col_schema.max_repetition_level,
required_bits(col_schema.max_definition_level),
required_bits(col_schema.max_repetition_level), col_meta.codec,
converted_type, col_schema.decimal_scale, ts_clock_rate));
LOG_PRINTF(
" %2d: %s start_row=%d, num_rows=%d, codec=%d, "
"num_values=%ld\n total_compressed_size=%ld "
"total_uncompressed_size=%ld\n schema_idx=%d, type=%d, "
"type_width=%d, max_def_level=%d, "
"max_rep_level=%d\n data_page_offset=%zd, index_page_offset=%zd, "
"dict_page_offset=%zd\n",
col.first, col.second.c_str(), row_group_start, row_group_rows,
col_meta.codec, col_meta.num_values, col_meta.total_compressed_size,
col_meta.total_uncompressed_size,
row_group.columns[col.first].schema_idx,
chunks[chunks.size() - 1].data_type, type_width,
col_schema.max_definition_level, col_schema.max_repetition_level,
(size_t)col_meta.data_page_offset, (size_t)col_meta.index_page_offset,
(size_t)col_meta.dictionary_page_offset);
// Map each column chunk to its output gdf_column
chunk_map[chunks.size() - 1] = gdf_column.get();
if (col_meta.codec != parquet::Compression::UNCOMPRESSED) {
total_decompressed_size += col_meta.total_uncompressed_size;
}
}
remaining_rows -= row_group.num_rows;
}
assert(remaining_rows <= 0);
// Allocate output memory and convert Parquet format into cuDF format
const auto total_pages = count_page_headers(chunks);
if (total_pages > 0) {
hostdevice_vector<parquet::gpu::PageInfo> pages(total_pages, total_pages);
rmm::device_buffer decomp_page_data;
decode_page_headers(chunks, pages);
if (total_decompressed_size > 0) {
decomp_page_data = decompress_page_data(chunks, pages);
// Free compressed data
for (size_t c = 0; c < chunks.size(); c++) {
if (chunks[c].codec != parquet::Compression::UNCOMPRESSED) {
page_data[c].resize(0);
page_data[c].shrink_to_fit();
}
}
}
for (auto &column : columns) {
column.allocate();
}
decode_page_data(chunks, pages, chunk_map, skip_rows, num_rows);
// Perform any final column preparation (may reference decoded data)
for (auto &column : columns) {
column.finalize();
}
} else {
// Columns' data's memory is still expected for an empty dataframe
for (auto &column : columns) {
column.allocate();
column.finalize();
}
}
// Transfer ownership to raw pointer output arguments
std::vector<gdf_column *> out_cols(columns.size());
for (size_t i = 0; i < columns.size(); ++i) {
out_cols[i] = columns[i].release();
}
return cudf::table(out_cols.data(), out_cols.size());
}
reader::reader(std::string filepath, reader_options const &options)
: impl_(std::make_unique<Impl>(datasource::create(filepath), options)) {}
reader::reader(const char *buffer, size_t length, reader_options const &options)
: impl_(std::make_unique<Impl>(datasource::create(buffer, length),
options)) {}
reader::reader(std::shared_ptr<arrow::io::RandomAccessFile> file,
reader_options const &options)
: impl_(std::make_unique<Impl>(datasource::create(file), options)) {}
std::string reader::get_index_column() {
return impl_->get_index_column();
}
table reader::read_all() {
return impl_->read(0, -1, -1);
}
table reader::read_rows(size_t skip_rows, size_t num_rows) {
return impl_->read(skip_rows, (num_rows != 0) ? (int)num_rows : -1, -1);
}
table reader::read_row_group(size_t row_group) {
return impl_->read(0, -1, row_group);
}
reader::~reader() = default;
} // namespace parquet
} // namespace io
} // namespace cudf
| 176fa23fb60c8fa97906829bafbd715a11719743.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../parquet.h"
#include "../parquet_gpu.h"
#include "parquet_reader_impl.hpp"
#include <io/comp/gpuinflate.h>
#include <algorithm>
#include <array>
#include <rmm/device_buffer.hpp>
#include <rmm/thrust_rmm_allocator.h>
namespace cudf {
namespace io {
namespace parquet {
#if 0
#define LOG_PRINTF(...) std::printf(__VA_ARGS__)
#else
#define LOG_PRINTF(...) (void)0
#endif
/**
* @brief Function that translates cuDF time unit to Parquet clock frequency
**/
constexpr int32_t to_clockrate(gdf_time_unit time_unit) {
switch (time_unit) {
case TIME_UNIT_s:
return 1;
case TIME_UNIT_ms:
return 1000;
case TIME_UNIT_us:
return 1000000;
case TIME_UNIT_ns:
return 1000000000;
default:
return 0;
}
}
/**
* @brief Function that translates Parquet datatype to GDF dtype
**/
constexpr std::pair<gdf_dtype, gdf_dtype_extra_info> to_dtype(
parquet::Type physical, parquet::ConvertedType logical,
bool strings_to_categorical, gdf_time_unit ts_unit, int32_t decimal_scale) {
// Logical type used for actual data interpretation; the legacy converted type
// is superceded by 'logical' type whenever available.
switch (logical) {
case parquet::UINT_8:
case parquet::INT_8:
return std::make_pair(GDF_INT8, gdf_dtype_extra_info{TIME_UNIT_NONE});
case parquet::UINT_16:
case parquet::INT_16:
return std::make_pair(GDF_INT16, gdf_dtype_extra_info{TIME_UNIT_NONE});
case parquet::DATE:
return std::make_pair(GDF_DATE32, gdf_dtype_extra_info{TIME_UNIT_NONE});
case parquet::TIMESTAMP_MICROS:
return (ts_unit != TIME_UNIT_NONE)
? std::make_pair(GDF_TIMESTAMP, gdf_dtype_extra_info{ts_unit})
: std::make_pair(GDF_TIMESTAMP,
gdf_dtype_extra_info{TIME_UNIT_us});
case parquet::TIMESTAMP_MILLIS:
return (ts_unit != TIME_UNIT_NONE)
? std::make_pair(GDF_TIMESTAMP, gdf_dtype_extra_info{ts_unit})
: std::make_pair(GDF_TIMESTAMP,
gdf_dtype_extra_info{TIME_UNIT_ms});
case parquet::DECIMAL:
if (decimal_scale != 0 || (physical != parquet::INT32 && physical != parquet::INT64)) {
return std::make_pair(GDF_FLOAT64, gdf_dtype_extra_info{TIME_UNIT_NONE});
}
break;
default:
break;
}
// Physical storage type supported by Parquet; controls the on-disk storage
// format in combination with the encoding type.
switch (physical) {
case parquet::BOOLEAN:
return std::make_pair(GDF_BOOL8, gdf_dtype_extra_info{TIME_UNIT_NONE});
case parquet::INT32:
return std::make_pair(GDF_INT32, gdf_dtype_extra_info{TIME_UNIT_NONE});
case parquet::INT64:
return std::make_pair(GDF_INT64, gdf_dtype_extra_info{TIME_UNIT_NONE});
case parquet::FLOAT:
return std::make_pair(GDF_FLOAT32, gdf_dtype_extra_info{TIME_UNIT_NONE});
case parquet::DOUBLE:
return std::make_pair(GDF_FLOAT64, gdf_dtype_extra_info{TIME_UNIT_NONE});
case parquet::BYTE_ARRAY:
case parquet::FIXED_LEN_BYTE_ARRAY:
// Can be mapped to GDF_CATEGORY (32-bit hash) or GDF_STRING (nvstring)
return std::make_pair(strings_to_categorical ? GDF_CATEGORY : GDF_STRING,
gdf_dtype_extra_info{TIME_UNIT_NONE});
case parquet::INT96:
return (ts_unit != TIME_UNIT_NONE)
? std::make_pair(GDF_TIMESTAMP, gdf_dtype_extra_info{ts_unit})
: std::make_pair(GDF_TIMESTAMP,
gdf_dtype_extra_info{TIME_UNIT_ns});
default:
break;
}
return std::make_pair(GDF_invalid, gdf_dtype_extra_info{TIME_UNIT_NONE});
}
/**
* @brief Helper that returns the required the number of bits to store a value
**/
template <typename T = uint8_t>
T required_bits(uint32_t max_level) {
return static_cast<T>(
parquet::CompactProtocolReader::NumRequiredBits(max_level));
}
/**
* @brief A helper wrapper for Parquet file metadata. Provides some additional
* convenience methods for initializing and accessing the metadata and schema
**/
struct ParquetMetadata : public parquet::FileMetaData {
explicit ParquetMetadata(datasource *source) {
constexpr auto header_len = sizeof(parquet::file_header_s);
constexpr auto ender_len = sizeof(parquet::file_ender_s);
const auto len = source->size();
const auto header_buffer = source->get_buffer(0, header_len);
const auto header = (const parquet::file_header_s *)header_buffer->data();
const auto ender_buffer = source->get_buffer(len - ender_len, ender_len);
const auto ender = (const parquet::file_ender_s *)ender_buffer->data();
CUDF_EXPECTS(len > header_len + ender_len, "Incorrect data source");
CUDF_EXPECTS(
header->magic == PARQUET_MAGIC && ender->magic == PARQUET_MAGIC,
"Corrupted header or footer");
CUDF_EXPECTS(ender->footer_len != 0 &&
ender->footer_len <= (len - header_len - ender_len),
"Incorrect footer length");
const auto buffer = source->get_buffer(len - ender->footer_len - ender_len, ender->footer_len);
parquet::CompactProtocolReader cp(buffer->data(), ender->footer_len);
CUDF_EXPECTS(cp.read(this), "Cannot parse metadata");
CUDF_EXPECTS(cp.InitSchema(this), "Cannot initialize schema");
print_metadata();
}
inline int get_total_rows() const { return num_rows; }
inline int get_num_row_groups() const { return row_groups.size(); }
inline int get_num_columns() const { return row_groups[0].columns.size(); }
std::string get_column_name(const std::vector<std::string> &path_in_schema) {
std::string s = (path_in_schema.size() > 0) ? path_in_schema[0] : "";
for (size_t i = 1; i < path_in_schema.size(); i++) {
s += "." + path_in_schema[i];
}
return s;
}
std::vector<std::string> get_column_names() {
std::vector<std::string> all_names;
for (const auto &chunk : row_groups[0].columns) {
all_names.emplace_back(get_column_name(chunk.meta_data.path_in_schema));
}
return all_names;
}
/**
* @brief Extracts the column name used for the row indexes in a dataframe
*
* PANDAS adds its own metadata to the key_value section when writing out the
* dataframe to a file to aid in exact reconstruction. The JSON-formatted
* metadata contains the index column(s) and PANDA-specific datatypes.
*
* @return std::string Name of the index column
**/
std::string get_pandas_index_name() {
auto it =
std::find_if(key_value_metadata.begin(), key_value_metadata.end(),
[](const auto &item) { return item.key == "pandas"; });
if (it != key_value_metadata.end()) {
const auto pos = it->value.find("index_columns");
if (pos != std::string::npos) {
const auto begin = it->value.find('[', pos);
const auto end = it->value.find(']', begin);
if ((end - begin) > 1) {
return it->value.substr(begin + 2, end - begin - 3);
}
}
}
return "";
}
/**
* @brief Filters and reduces down to a selection of row groups
*
* @param[in] row_group Index of the row group to select
* @param[in,out] row_start Starting row of the selection
* @param[in,out] row_count Total number of rows selected
*
* @return List of row group indexes and its starting row
**/
auto select_row_groups(int row_group, int &row_start, int &row_count) {
std::vector<std::pair<int, int>> selection;
if (row_group != -1) {
CUDF_EXPECTS(row_group < get_num_row_groups(), "Non-existent row group");
for (int i = 0; i < row_group; ++i) {
row_start += row_groups[i].num_rows;
}
selection.emplace_back(row_group, row_start);
row_count = row_groups[row_group].num_rows;
} else {
row_start = std::max(row_start, 0);
if (row_count == -1) {
row_count = get_total_rows();
}
CUDF_EXPECTS(row_count >= 0, "Invalid row count");
CUDF_EXPECTS(row_start <= get_total_rows(), "Invalid row start");
for (int i = 0, count = 0; i < (int)row_groups.size(); ++i) {
count += row_groups[i].num_rows;
if (count > row_start || count == 0) {
selection.emplace_back(i, count - row_groups[i].num_rows);
}
if (count >= (row_start + row_count)) {
break;
}
}
}
return selection;
}
/**
* @brief Filters and reduces down to a selection of columns
*
* @param[in] use_names List of column names to select
* @param[in] include_index Whether to always include the PANDAS index column
* @param[in] pandas_index Name of the PANDAS index column
*
* @return List of column names
**/
auto select_columns(std::vector<std::string> use_names, bool include_index,
const std::string &pandas_index) {
std::vector<std::pair<int, std::string>> selection;
const auto names = get_column_names();
if (use_names.empty()) {
// No columns specified; include all in the dataset
for (const auto &name : names) {
selection.emplace_back(selection.size(), name);
}
} else {
// Load subset of columns; include PANDAS index unless excluded
if (include_index) {
if (std::find(use_names.begin(), use_names.end(), pandas_index) ==
use_names.end()) {
use_names.push_back(pandas_index);
}
}
for (const auto &use_name : use_names) {
for (size_t i = 0; i < names.size(); ++i) {
if (names[i] == use_name) {
selection.emplace_back(i, names[i]);
break;
}
}
}
}
return selection;
}
void print_metadata() const {
LOG_PRINTF("\n[+] Metadata:\n");
LOG_PRINTF(" version = %d\n", version);
LOG_PRINTF(" created_by = \"%s\"\n", created_by.c_str());
LOG_PRINTF(" schema (%zd entries):\n", schema.size());
for (size_t i = 0; i < schema.size(); i++) {
LOG_PRINTF(
" [%zd] type=%d, name=\"%s\", num_children=%d, rep_type=%d, "
"max_def_lvl=%d, max_rep_lvl=%d\n",
i, schema[i].type, schema[i].name.c_str(), schema[i].num_children,
schema[i].repetition_type, schema[i].max_definition_level,
schema[i].max_repetition_level);
}
LOG_PRINTF(" num rows = %zd\n", (size_t)num_rows);
LOG_PRINTF(" num row groups = %zd\n", row_groups.size());
LOG_PRINTF(" num columns = %zd\n", row_groups[0].columns.size());
}
};
size_t reader::Impl::count_page_headers(
const hostdevice_vector<parquet::gpu::ColumnChunkDesc> &chunks) {
size_t total_pages = 0;
CUDA_TRY(cudaMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(),
chunks.memory_size(), cudaMemcpyHostToDevice));
CUDA_TRY(parquet::gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size()));
CUDA_TRY(cudaMemcpyAsync(chunks.host_ptr(), chunks.device_ptr(),
chunks.memory_size(), cudaMemcpyDeviceToHost));
CUDA_TRY(cudaStreamSynchronize(0));
LOG_PRINTF("[+] Chunk Information\n");
for (size_t c = 0; c < chunks.size(); c++) {
LOG_PRINTF(
" %2zd: comp_data=%ld, comp_size=%zd, num_values=%zd\n "
"start_row=%zd num_rows=%d max_def_level=%d max_rep_level=%d\n "
"data_type=%d def_level_bits=%d rep_level_bits=%d\n "
"num_data_pages=%d num_dict_pages=%d max_num_pages=%d\n",
c, (uint64_t)chunks[c].compressed_data, chunks[c].compressed_size,
chunks[c].num_values, chunks[c].start_row, chunks[c].num_rows,
chunks[c].max_def_level, chunks[c].max_rep_level, chunks[c].data_type,
chunks[c].def_level_bits, chunks[c].rep_level_bits,
chunks[c].num_data_pages, chunks[c].num_dict_pages,
chunks[c].max_num_pages);
total_pages += chunks[c].num_data_pages + chunks[c].num_dict_pages;
}
return total_pages;
}
void reader::Impl::decode_page_headers(
const hostdevice_vector<parquet::gpu::ColumnChunkDesc> &chunks,
const hostdevice_vector<parquet::gpu::PageInfo> &pages) {
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
chunks[c].max_num_pages = chunks[c].num_data_pages + chunks[c].num_dict_pages;
chunks[c].page_info = pages.device_ptr(page_count);
page_count += chunks[c].max_num_pages;
}
CUDA_TRY(cudaMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(),
chunks.memory_size(), cudaMemcpyHostToDevice));
CUDA_TRY(parquet::gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size()));
CUDA_TRY(cudaMemcpyAsync(pages.host_ptr(), pages.device_ptr(),
pages.memory_size(), cudaMemcpyDeviceToHost));
CUDA_TRY(cudaStreamSynchronize(0));
LOG_PRINTF("[+] Page Header Information\n");
for (size_t i = 0; i < pages.size(); i++) {
LOG_PRINTF(
" %2zd: comp_size=%d, uncomp_size=%d, num_values=%d, chunk_row=%d, "
"num_rows=%d\n chunk_idx=%d, flags=%d, encoding=%d, def_level=%d "
"rep_level=%d, valid_count=%d\n",
i, pages[i].compressed_page_size, pages[i].uncompressed_page_size,
pages[i].num_values, pages[i].chunk_row, pages[i].num_rows,
pages[i].chunk_idx, pages[i].flags, pages[i].encoding,
pages[i].definition_level_encoding, pages[i].repetition_level_encoding,
pages[i].valid_count);
}
}
rmm::device_buffer reader::Impl::decompress_page_data(
const hostdevice_vector<parquet::gpu::ColumnChunkDesc> &chunks,
const hostdevice_vector<parquet::gpu::PageInfo> &pages) {
auto for_each_codec_page = [&](parquet::Compression codec,
const std::function<void(size_t)> &f) {
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
const auto page_stride = chunks[c].max_num_pages;
if (chunks[c].codec == codec) {
for (int k = 0; k < page_stride; k++) {
f(page_count + k);
}
}
page_count += page_stride;
}
};
// Brotli scratch memory for decompressing
rmm::device_vector<uint8_t> debrotli_scratch;
// Count the exact number of compressed pages
size_t num_compressed_pages = 0;
size_t total_decompressed_size = 0;
std::array<std::pair<parquet::Compression, size_t>, 3> codecs{
std::make_pair(parquet::GZIP, 0), std::make_pair(parquet::SNAPPY, 0),
std::make_pair(parquet::BROTLI, 0)};
for (auto &codec : codecs) {
for_each_codec_page(codec.first, [&](size_t page) {
total_decompressed_size += pages[page].uncompressed_page_size;
codec.second++;
num_compressed_pages++;
});
if (codec.first == parquet::BROTLI && codec.second > 0) {
debrotli_scratch.resize(get_gpu_debrotli_scratch_size(codec.second));
}
}
LOG_PRINTF(
"[+] Compression\n Total compressed size: %zd\n Number of "
"compressed pages: %zd\n gzip: %zd \n snappy: %zd\n",
total_decompressed_size, num_compressed_pages, codecs[0].second,
codecs[1].second);
// Dispatch batches of pages to decompress for each codec
rmm::device_buffer decomp_pages(align_size(total_decompressed_size));
hostdevice_vector<gpu_inflate_input_s> inflate_in(0, num_compressed_pages);
hostdevice_vector<gpu_inflate_status_s> inflate_out(0, num_compressed_pages);
size_t decomp_offset = 0;
int32_t argc = 0;
for (const auto &codec : codecs) {
if (codec.second > 0) {
int32_t start_pos = argc;
for_each_codec_page(codec.first, [&](size_t page) {
auto dst_base = static_cast<uint8_t *>(decomp_pages.data());
inflate_in[argc].srcDevice = pages[page].page_data;
inflate_in[argc].srcSize = pages[page].compressed_page_size;
inflate_in[argc].dstDevice = dst_base + decomp_offset;
inflate_in[argc].dstSize = pages[page].uncompressed_page_size;
inflate_out[argc].bytes_written = 0;
inflate_out[argc].status = static_cast<uint32_t>(-1000);
inflate_out[argc].reserved = 0;
pages[page].page_data = (uint8_t *)inflate_in[argc].dstDevice;
decomp_offset += inflate_in[argc].dstSize;
argc++;
});
CUDA_TRY(cudaMemcpyAsync(
inflate_in.device_ptr(start_pos), inflate_in.host_ptr(start_pos),
sizeof(decltype(inflate_in)::value_type) * (argc - start_pos),
cudaMemcpyHostToDevice));
CUDA_TRY(cudaMemcpyAsync(
inflate_out.device_ptr(start_pos),
inflate_out.host_ptr(start_pos),
sizeof(decltype(inflate_out)::value_type) * (argc - start_pos),
cudaMemcpyHostToDevice));
switch (codec.first) {
case parquet::GZIP:
CUDA_TRY(gpuinflate(inflate_in.device_ptr(start_pos),
inflate_out.device_ptr(start_pos),
argc - start_pos, 1))
break;
case parquet::SNAPPY:
CUDA_TRY(gpu_unsnap(inflate_in.device_ptr(start_pos),
inflate_out.device_ptr(start_pos),
argc - start_pos));
break;
case parquet::BROTLI:
CUDA_TRY(gpu_debrotli(inflate_in.device_ptr(start_pos),
inflate_out.device_ptr(start_pos),
debrotli_scratch.data().get(),
debrotli_scratch.size(), argc - start_pos));
break;
default:
CUDF_EXPECTS(false, "Unexpected decompression dispatch");
break;
}
CUDA_TRY(cudaMemcpyAsync(
inflate_out.host_ptr(start_pos),
inflate_out.device_ptr(start_pos),
sizeof(decltype(inflate_out)::value_type) * (argc - start_pos),
cudaMemcpyDeviceToHost));
}
}
CUDA_TRY(cudaStreamSynchronize(0));
// Update the page information in device memory with the updated value of
// page_data; it now points to the uncompressed data buffer
CUDA_TRY(cudaMemcpyAsync(pages.device_ptr(), pages.host_ptr(),
pages.memory_size(), cudaMemcpyHostToDevice));
return decomp_pages;
}
void reader::Impl::decode_page_data(
const hostdevice_vector<parquet::gpu::ColumnChunkDesc> &chunks,
const hostdevice_vector<parquet::gpu::PageInfo> &pages,
const std::vector<gdf_column *> &chunk_map, size_t min_row,
size_t total_rows) {
auto is_dict_chunk = [](const parquet::gpu::ColumnChunkDesc &chunk) {
return (chunk.data_type & 0x7) == parquet::BYTE_ARRAY &&
chunk.num_dict_pages > 0;
};
// Count the number of string dictionary entries
// NOTE: Assumes first page in the chunk is always the dictionary page
size_t total_str_dict_indexes = 0;
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
if (is_dict_chunk(chunks[c])) {
total_str_dict_indexes += pages[page_count].num_values;
}
page_count += chunks[c].max_num_pages;
}
// Build index for string dictionaries since they can't be indexed
// directly due to variable-sized elements
rmm::device_vector<parquet::gpu::nvstrdesc_s> str_dict_index;
if (total_str_dict_indexes > 0) {
str_dict_index.resize(total_str_dict_indexes);
}
// Update chunks with pointers to column data
for (size_t c = 0, page_count = 0, str_ofs = 0; c < chunks.size(); c++) {
if (is_dict_chunk(chunks[c])) {
chunks[c].str_dict_index = str_dict_index.data().get() + str_ofs;
str_ofs += pages[page_count].num_values;
}
chunks[c].valid_map_base = (uint32_t *)chunk_map[c]->valid;
chunks[c].column_data_base = chunk_map[c]->data;
page_count += chunks[c].max_num_pages;
}
CUDA_TRY(cudaMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(),
chunks.memory_size(), cudaMemcpyHostToDevice));
if (total_str_dict_indexes > 0) {
CUDA_TRY(BuildStringDictionaryIndex(chunks.device_ptr(), chunks.size()));
}
CUDA_TRY(DecodePageData(pages.device_ptr(), pages.size(), chunks.device_ptr(),
chunks.size(), total_rows, min_row));
CUDA_TRY(cudaMemcpyAsync(pages.host_ptr(), pages.device_ptr(),
pages.memory_size(), cudaMemcpyDeviceToHost));
CUDA_TRY(cudaStreamSynchronize(0));
LOG_PRINTF("[+] Page Data Information\n");
for (size_t i = 0; i < pages.size(); i++) {
if (pages[i].num_rows > 0) {
LOG_PRINTF(" %2zd: valid_count=%d/%d\n", i, pages[i].valid_count,
pages[i].num_rows);
const size_t c = pages[i].chunk_idx;
if (c < chunks.size()) {
chunk_map[c]->null_count += pages[i].num_rows - pages[i].valid_count;
}
}
}
}
reader::Impl::Impl(std::unique_ptr<datasource> source,
reader_options const &options)
: source_(std::move(source)) {
// Open and parse the source Parquet dataset metadata
md_ = std::make_unique<ParquetMetadata>(source_.get());
// Store the index column (PANDAS-specific)
pandas_index_col_ = md_->get_pandas_index_name();
// Select only columns required by the options
selected_cols_ = md_->select_columns(
options.columns, options.use_pandas_metadata, pandas_index_col_);
// Override output timestamp resolution if requested
if (options.timestamp_unit != TIME_UNIT_NONE) {
timestamp_unit_ = options.timestamp_unit;
}
// Strings may be returned as either GDF_STRING or GDF_CATEGORY columns
strings_to_categorical_ = options.strings_to_categorical;
}
table reader::Impl::read(int skip_rows, int num_rows, int row_group) {
// Select only row groups required
const auto selected_row_groups =
md_->select_row_groups(row_group, skip_rows, num_rows);
const auto num_columns = selected_cols_.size();
// Return empty table rather than exception if nothing to load
if (selected_row_groups.empty() || selected_cols_.empty()) {
return cudf::table{};
}
// Initialize gdf_columns, but hold off on allocating storage space
LOG_PRINTF("[+] Selected row groups: %d\n", (int)selected_row_groups.size());
LOG_PRINTF("[+] Selected columns: %d\n", (int)num_columns);
LOG_PRINTF("[+] Selected skip_rows: %d num_rows: %d\n", skip_rows, num_rows);
std::vector<gdf_column_wrapper> columns;
for (const auto &col : selected_cols_) {
auto row_group_0 = md_->row_groups[selected_row_groups[0].first];
auto &col_schema = md_->schema[row_group_0.columns[col.first].schema_idx];
auto dtype_info = to_dtype(col_schema.type, col_schema.converted_type,
strings_to_categorical_, timestamp_unit_, col_schema.decimal_scale);
columns.emplace_back(static_cast<cudf::size_type>(num_rows), dtype_info.first,
dtype_info.second, col.second);
LOG_PRINTF(" %2zd: name=%s size=%zd type=%d data=%lx valid=%lx\n",
columns.size() - 1, columns.back()->col_name,
(size_t)columns.back()->size, columns.back()->dtype,
(uint64_t)columns.back()->data, (uint64_t)columns.back()->valid);
}
// Descriptors for all the chunks that make up the selected columns
const auto num_column_chunks = selected_row_groups.size() * num_columns;
hostdevice_vector<parquet::gpu::ColumnChunkDesc> chunks(0, num_column_chunks);
// Association between each column chunk and its gdf_column
std::vector<gdf_column *> chunk_map(num_column_chunks);
// Tracker for eventually deallocating compressed and uncompressed data
std::vector<rmm::device_buffer> page_data(num_column_chunks);
// Initialize column chunk info
LOG_PRINTF("[+] Column Chunk Description\n");
size_t total_decompressed_size = 0;
auto remaining_rows = num_rows;
for (const auto &rg : selected_row_groups) {
const auto row_group = md_->row_groups[rg.first];
const auto row_group_start = rg.second;
const auto row_group_rows = std::min(remaining_rows, (int)row_group.num_rows);
for (size_t i = 0; i < num_columns; ++i) {
auto col = selected_cols_[i];
auto &col_meta = row_group.columns[col.first].meta_data;
auto &col_schema = md_->schema[row_group.columns[col.first].schema_idx];
auto &gdf_column = columns[i];
// Spec requires each row group to contain exactly one chunk for every
// column. If there are too many or too few, continue with best effort
if (col.second != md_->get_column_name(col_meta.path_in_schema)) {
std::cerr << "Detected mismatched column chunk" << std::endl;
continue;
}
if (chunks.size() >= chunks.max_size()) {
std::cerr << "Detected too many column chunks" << std::endl;
continue;
}
int32_t type_width = (col_schema.type == parquet::FIXED_LEN_BYTE_ARRAY)
? col_schema.type_length
: 0;
int32_t ts_clock_rate = 0;
if (gdf_column->dtype == GDF_INT8)
type_width = 1; // I32 -> I8
else if (gdf_column->dtype == GDF_INT16)
type_width = 2; // I32 -> I16
else if (gdf_column->dtype == GDF_CATEGORY)
type_width = 4; // str -> hash32
else if (gdf_column->dtype == GDF_TIMESTAMP)
ts_clock_rate = to_clockrate(timestamp_unit_);
int8_t converted_type = col_schema.converted_type;
if (converted_type == parquet::DECIMAL && gdf_column->dtype != GDF_FLOAT64) {
converted_type = parquet::UNKNOWN; // Not converting to float64
}
uint8_t *d_compdata = nullptr;
if (col_meta.total_compressed_size != 0) {
const auto offset = (col_meta.dictionary_page_offset != 0)
? std::min(col_meta.data_page_offset,
col_meta.dictionary_page_offset)
: col_meta.data_page_offset;
const auto buffer =
source_->get_buffer(offset, col_meta.total_compressed_size);
page_data[chunks.size()] = rmm::device_buffer(buffer->data(), buffer->size());
d_compdata = static_cast<uint8_t *>(page_data[chunks.size()].data());
}
chunks.insert(parquet::gpu::ColumnChunkDesc(
col_meta.total_compressed_size, d_compdata, col_meta.num_values,
col_schema.type, type_width, row_group_start, row_group_rows,
col_schema.max_definition_level, col_schema.max_repetition_level,
required_bits(col_schema.max_definition_level),
required_bits(col_schema.max_repetition_level), col_meta.codec,
converted_type, col_schema.decimal_scale, ts_clock_rate));
LOG_PRINTF(
" %2d: %s start_row=%d, num_rows=%d, codec=%d, "
"num_values=%ld\n total_compressed_size=%ld "
"total_uncompressed_size=%ld\n schema_idx=%d, type=%d, "
"type_width=%d, max_def_level=%d, "
"max_rep_level=%d\n data_page_offset=%zd, index_page_offset=%zd, "
"dict_page_offset=%zd\n",
col.first, col.second.c_str(), row_group_start, row_group_rows,
col_meta.codec, col_meta.num_values, col_meta.total_compressed_size,
col_meta.total_uncompressed_size,
row_group.columns[col.first].schema_idx,
chunks[chunks.size() - 1].data_type, type_width,
col_schema.max_definition_level, col_schema.max_repetition_level,
(size_t)col_meta.data_page_offset, (size_t)col_meta.index_page_offset,
(size_t)col_meta.dictionary_page_offset);
// Map each column chunk to its output gdf_column
chunk_map[chunks.size() - 1] = gdf_column.get();
if (col_meta.codec != parquet::Compression::UNCOMPRESSED) {
total_decompressed_size += col_meta.total_uncompressed_size;
}
}
remaining_rows -= row_group.num_rows;
}
assert(remaining_rows <= 0);
// Allocate output memory and convert Parquet format into cuDF format
const auto total_pages = count_page_headers(chunks);
if (total_pages > 0) {
hostdevice_vector<parquet::gpu::PageInfo> pages(total_pages, total_pages);
rmm::device_buffer decomp_page_data;
decode_page_headers(chunks, pages);
if (total_decompressed_size > 0) {
decomp_page_data = decompress_page_data(chunks, pages);
// Free compressed data
for (size_t c = 0; c < chunks.size(); c++) {
if (chunks[c].codec != parquet::Compression::UNCOMPRESSED) {
page_data[c].resize(0);
page_data[c].shrink_to_fit();
}
}
}
for (auto &column : columns) {
column.allocate();
}
decode_page_data(chunks, pages, chunk_map, skip_rows, num_rows);
// Perform any final column preparation (may reference decoded data)
for (auto &column : columns) {
column.finalize();
}
} else {
// Columns' data's memory is still expected for an empty dataframe
for (auto &column : columns) {
column.allocate();
column.finalize();
}
}
// Transfer ownership to raw pointer output arguments
std::vector<gdf_column *> out_cols(columns.size());
for (size_t i = 0; i < columns.size(); ++i) {
out_cols[i] = columns[i].release();
}
return cudf::table(out_cols.data(), out_cols.size());
}
reader::reader(std::string filepath, reader_options const &options)
: impl_(std::make_unique<Impl>(datasource::create(filepath), options)) {}
reader::reader(const char *buffer, size_t length, reader_options const &options)
: impl_(std::make_unique<Impl>(datasource::create(buffer, length),
options)) {}
reader::reader(std::shared_ptr<arrow::io::RandomAccessFile> file,
reader_options const &options)
: impl_(std::make_unique<Impl>(datasource::create(file), options)) {}
std::string reader::get_index_column() {
return impl_->get_index_column();
}
table reader::read_all() {
return impl_->read(0, -1, -1);
}
table reader::read_rows(size_t skip_rows, size_t num_rows) {
return impl_->read(skip_rows, (num_rows != 0) ? (int)num_rows : -1, -1);
}
table reader::read_row_group(size_t row_group) {
return impl_->read(0, -1, row_group);
}
reader::~reader() = default;
} // namespace parquet
} // namespace io
} // namespace cudf
|
5e7e369b230e2868e74e71193488edd26e115e98.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2019-2020 XGBoost contributors
*/
#include <xgboost/base.h>
#include <utility>
#include "../helpers.h"
#include "../histogram_helpers.h"
#include "gtest/gtest.h"
#include "../../../src/common/categorical.h"
#include "../../../src/common/hist_util.h"
#include "../../../src/data/ellpack_page.cuh"
namespace xgboost {
TEST(EllpackPage, EmptyDMatrix) {
constexpr int kNRows = 0, kNCols = 0, kMaxBin = 256;
constexpr float kSparsity = 0;
auto dmat = RandomDataGenerator(kNRows, kNCols, kSparsity).GenerateDMatrix();
auto& page = *dmat->GetBatches<EllpackPage>({0, kMaxBin}).begin();
auto impl = page.Impl();
ASSERT_EQ(impl->row_stride, 0);
ASSERT_EQ(impl->Cuts().TotalBins(), 0);
ASSERT_EQ(impl->gidx_buffer.Size(), 4);
}
TEST(EllpackPage, BuildGidxDense) {
int constexpr kNRows = 16, kNCols = 8;
auto page = BuildEllpackPage(kNRows, kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer(page->gidx_buffer.HostVector());
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), page->NumSymbols());
ASSERT_EQ(page->row_stride, kNCols);
std::vector<uint32_t> solution = {
0, 3, 8, 9, 14, 17, 20, 21,
0, 4, 7, 10, 14, 16, 19, 22,
1, 3, 7, 11, 14, 15, 19, 21,
2, 3, 7, 9, 13, 16, 20, 22,
2, 3, 6, 9, 12, 16, 20, 21,
1, 5, 6, 10, 13, 16, 20, 21,
2, 5, 8, 9, 13, 17, 19, 22,
2, 4, 6, 10, 14, 17, 19, 21,
2, 5, 7, 9, 13, 16, 19, 22,
0, 3, 8, 10, 12, 16, 19, 22,
1, 3, 7, 10, 13, 16, 19, 21,
1, 3, 8, 10, 13, 17, 20, 22,
2, 4, 6, 9, 14, 15, 19, 22,
1, 4, 6, 9, 13, 16, 19, 21,
2, 4, 8, 10, 14, 15, 19, 22,
1, 4, 7, 10, 14, 16, 19, 21,
};
for (size_t i = 0; i < kNRows * kNCols; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
TEST(EllpackPage, BuildGidxSparse) {
int constexpr kNRows = 16, kNCols = 8;
auto page = BuildEllpackPage(kNRows, kNCols, 0.9f);
std::vector<common::CompressedByteT> h_gidx_buffer(page->gidx_buffer.HostVector());
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_LE(page->row_stride, 3);
// row_stride = 3, 16 rows, 48 entries for ELLPack
std::vector<uint32_t> solution = {
15, 24, 24, 0, 24, 24, 24, 24, 24, 24, 24, 24, 20, 24, 24, 24,
24, 24, 24, 24, 24, 5, 24, 24, 0, 16, 24, 15, 24, 24, 24, 24,
24, 7, 14, 16, 4, 24, 24, 24, 24, 24, 9, 24, 24, 1, 24, 24
};
for (size_t i = 0; i < kNRows * page->row_stride; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
TEST(EllpackPage, FromCategoricalBasic) {
using common::AsCat;
size_t constexpr kRows = 1000, kCats = 13, kCols = 1;
size_t max_bins = 8;
auto x = GenerateRandomCategoricalSingleColumn(kRows, kCats);
auto m = GetDMatrixFromData(x, kRows, 1);
auto& h_ft = m->Info().feature_types.HostVector();
h_ft.resize(kCols, FeatureType::kCategorical);
BatchParam p(0, max_bins);
auto ellpack = EllpackPage(m.get(), p);
auto accessor = ellpack.Impl()->GetDeviceAccessor(0);
ASSERT_EQ(kCats, accessor.NumBins());
auto x_copy = x;
std::sort(x_copy.begin(), x_copy.end());
auto n_uniques = std::unique(x_copy.begin(), x_copy.end()) - x_copy.begin();
ASSERT_EQ(n_uniques, kCats);
std::vector<uint32_t> h_cuts_ptr(accessor.feature_segments.size());
dh::CopyDeviceSpanToVector(&h_cuts_ptr, accessor.feature_segments);
std::vector<float> h_cuts_values(accessor.gidx_fvalue_map.size());
dh::CopyDeviceSpanToVector(&h_cuts_values, accessor.gidx_fvalue_map);
ASSERT_EQ(h_cuts_ptr.size(), 2);
ASSERT_EQ(h_cuts_values.size(), kCats);
std::vector<common::CompressedByteT> const &h_gidx_buffer =
ellpack.Impl()->gidx_buffer.HostVector();
auto h_gidx_iter = common::CompressedIterator<uint32_t>(
h_gidx_buffer.data(), accessor.NumSymbols());
for (size_t i = 0; i < x.size(); ++i) {
auto bin = h_gidx_iter[i];
auto bin_value = h_cuts_values.at(bin);
ASSERT_EQ(AsCat(x[i]), AsCat(bin_value));
}
}
struct ReadRowFunction {
EllpackDeviceAccessor matrix;
int row;
bst_float* row_data_d;
ReadRowFunction(EllpackDeviceAccessor matrix, int row, bst_float* row_data_d)
: matrix(std::move(matrix)), row(row), row_data_d(row_data_d) {}
__device__ void operator()(size_t col) {
auto value = matrix.GetFvalue(row, col);
if (isnan(value)) {
value = -1;
}
row_data_d[col] = value;
}
};
TEST(EllpackPage, Copy) {
constexpr size_t kRows = 1024;
constexpr size_t kCols = 16;
constexpr size_t kPageSize = 1024;
// Create a DMatrix with multiple batches.
dmlc::TemporaryDirectory tmpdir;
std::unique_ptr<DMatrix>
dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, kPageSize, true, tmpdir));
BatchParam param{0, 256, kPageSize};
auto page = (*dmat->GetBatches<EllpackPage>(param).begin()).Impl();
// Create an empty result page.
EllpackPageImpl result(0, page->Cuts(), page->is_dense, page->row_stride,
kRows);
// Copy batch pages into the result page.
size_t offset = 0;
for (auto& batch : dmat->GetBatches<EllpackPage>(param)) {
size_t num_elements = result.Copy(0, batch.Impl(), offset);
offset += num_elements;
}
size_t current_row = 0;
thrust::device_vector<bst_float> row_d(kCols);
thrust::device_vector<bst_float> row_result_d(kCols);
std::vector<bst_float> row(kCols);
std::vector<bst_float> row_result(kCols);
for (auto& page : dmat->GetBatches<EllpackPage>(param)) {
auto impl = page.Impl();
EXPECT_EQ(impl->base_rowid, current_row);
for (size_t i = 0; i < impl->Size(); i++) {
dh::LaunchN(0, kCols, ReadRowFunction(impl->GetDeviceAccessor(0), current_row, row_d.data().get()));
thrust::copy(row_d.begin(), row_d.end(), row.begin());
dh::LaunchN(0, kCols, ReadRowFunction(result.GetDeviceAccessor(0), current_row, row_result_d.data().get()));
thrust::copy(row_result_d.begin(), row_result_d.end(), row_result.begin());
EXPECT_EQ(row, row_result);
current_row++;
}
}
}
TEST(EllpackPage, Compact) {
constexpr size_t kRows = 16;
constexpr size_t kCols = 2;
constexpr size_t kPageSize = 1;
constexpr size_t kCompactedRows = 8;
// Create a DMatrix with multiple batches.
dmlc::TemporaryDirectory tmpdir;
std::unique_ptr<DMatrix>
dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, kPageSize, true, tmpdir));
BatchParam param{0, 256, kPageSize};
auto page = (*dmat->GetBatches<EllpackPage>(param).begin()).Impl();
// Create an empty result page.
EllpackPageImpl result(0, page->Cuts(), page->is_dense, page->row_stride,
kCompactedRows);
// Compact batch pages into the result page.
std::vector<size_t> row_indexes_h {
SIZE_MAX, 0, 1, 2, SIZE_MAX, 3, SIZE_MAX, 4, 5, SIZE_MAX, 6, SIZE_MAX, 7, SIZE_MAX, SIZE_MAX,
SIZE_MAX};
thrust::device_vector<size_t> row_indexes_d = row_indexes_h;
common::Span<size_t> row_indexes_span(row_indexes_d.data().get(), kRows);
for (auto& batch : dmat->GetBatches<EllpackPage>(param)) {
result.Compact(0, batch.Impl(), row_indexes_span);
}
size_t current_row = 0;
thrust::device_vector<bst_float> row_d(kCols);
thrust::device_vector<bst_float> row_result_d(kCols);
std::vector<bst_float> row(kCols);
std::vector<bst_float> row_result(kCols);
for (auto& page : dmat->GetBatches<EllpackPage>(param)) {
auto impl = page.Impl();
EXPECT_EQ(impl->base_rowid, current_row);
for (size_t i = 0; i < impl->Size(); i++) {
size_t compacted_row = row_indexes_h[current_row];
if (compacted_row == SIZE_MAX) {
current_row++;
continue;
}
dh::LaunchN(0, kCols, ReadRowFunction(impl->GetDeviceAccessor(0), current_row, row_d.data().get()));
dh::safe_cuda (hipDeviceSynchronize());
thrust::copy(row_d.begin(), row_d.end(), row.begin());
dh::LaunchN(0, kCols,
ReadRowFunction(result.GetDeviceAccessor(0), compacted_row, row_result_d.data().get()));
thrust::copy(row_result_d.begin(), row_result_d.end(), row_result.begin());
EXPECT_EQ(row, row_result);
current_row++;
}
}
}
} // namespace xgboost
| 5e7e369b230e2868e74e71193488edd26e115e98.cu | /*!
* Copyright 2019-2020 XGBoost contributors
*/
#include <xgboost/base.h>
#include <utility>
#include "../helpers.h"
#include "../histogram_helpers.h"
#include "gtest/gtest.h"
#include "../../../src/common/categorical.h"
#include "../../../src/common/hist_util.h"
#include "../../../src/data/ellpack_page.cuh"
namespace xgboost {
TEST(EllpackPage, EmptyDMatrix) {
constexpr int kNRows = 0, kNCols = 0, kMaxBin = 256;
constexpr float kSparsity = 0;
auto dmat = RandomDataGenerator(kNRows, kNCols, kSparsity).GenerateDMatrix();
auto& page = *dmat->GetBatches<EllpackPage>({0, kMaxBin}).begin();
auto impl = page.Impl();
ASSERT_EQ(impl->row_stride, 0);
ASSERT_EQ(impl->Cuts().TotalBins(), 0);
ASSERT_EQ(impl->gidx_buffer.Size(), 4);
}
TEST(EllpackPage, BuildGidxDense) {
int constexpr kNRows = 16, kNCols = 8;
auto page = BuildEllpackPage(kNRows, kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer(page->gidx_buffer.HostVector());
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), page->NumSymbols());
ASSERT_EQ(page->row_stride, kNCols);
std::vector<uint32_t> solution = {
0, 3, 8, 9, 14, 17, 20, 21,
0, 4, 7, 10, 14, 16, 19, 22,
1, 3, 7, 11, 14, 15, 19, 21,
2, 3, 7, 9, 13, 16, 20, 22,
2, 3, 6, 9, 12, 16, 20, 21,
1, 5, 6, 10, 13, 16, 20, 21,
2, 5, 8, 9, 13, 17, 19, 22,
2, 4, 6, 10, 14, 17, 19, 21,
2, 5, 7, 9, 13, 16, 19, 22,
0, 3, 8, 10, 12, 16, 19, 22,
1, 3, 7, 10, 13, 16, 19, 21,
1, 3, 8, 10, 13, 17, 20, 22,
2, 4, 6, 9, 14, 15, 19, 22,
1, 4, 6, 9, 13, 16, 19, 21,
2, 4, 8, 10, 14, 15, 19, 22,
1, 4, 7, 10, 14, 16, 19, 21,
};
for (size_t i = 0; i < kNRows * kNCols; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
TEST(EllpackPage, BuildGidxSparse) {
int constexpr kNRows = 16, kNCols = 8;
auto page = BuildEllpackPage(kNRows, kNCols, 0.9f);
std::vector<common::CompressedByteT> h_gidx_buffer(page->gidx_buffer.HostVector());
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_LE(page->row_stride, 3);
// row_stride = 3, 16 rows, 48 entries for ELLPack
std::vector<uint32_t> solution = {
15, 24, 24, 0, 24, 24, 24, 24, 24, 24, 24, 24, 20, 24, 24, 24,
24, 24, 24, 24, 24, 5, 24, 24, 0, 16, 24, 15, 24, 24, 24, 24,
24, 7, 14, 16, 4, 24, 24, 24, 24, 24, 9, 24, 24, 1, 24, 24
};
for (size_t i = 0; i < kNRows * page->row_stride; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
TEST(EllpackPage, FromCategoricalBasic) {
using common::AsCat;
size_t constexpr kRows = 1000, kCats = 13, kCols = 1;
size_t max_bins = 8;
auto x = GenerateRandomCategoricalSingleColumn(kRows, kCats);
auto m = GetDMatrixFromData(x, kRows, 1);
auto& h_ft = m->Info().feature_types.HostVector();
h_ft.resize(kCols, FeatureType::kCategorical);
BatchParam p(0, max_bins);
auto ellpack = EllpackPage(m.get(), p);
auto accessor = ellpack.Impl()->GetDeviceAccessor(0);
ASSERT_EQ(kCats, accessor.NumBins());
auto x_copy = x;
std::sort(x_copy.begin(), x_copy.end());
auto n_uniques = std::unique(x_copy.begin(), x_copy.end()) - x_copy.begin();
ASSERT_EQ(n_uniques, kCats);
std::vector<uint32_t> h_cuts_ptr(accessor.feature_segments.size());
dh::CopyDeviceSpanToVector(&h_cuts_ptr, accessor.feature_segments);
std::vector<float> h_cuts_values(accessor.gidx_fvalue_map.size());
dh::CopyDeviceSpanToVector(&h_cuts_values, accessor.gidx_fvalue_map);
ASSERT_EQ(h_cuts_ptr.size(), 2);
ASSERT_EQ(h_cuts_values.size(), kCats);
std::vector<common::CompressedByteT> const &h_gidx_buffer =
ellpack.Impl()->gidx_buffer.HostVector();
auto h_gidx_iter = common::CompressedIterator<uint32_t>(
h_gidx_buffer.data(), accessor.NumSymbols());
for (size_t i = 0; i < x.size(); ++i) {
auto bin = h_gidx_iter[i];
auto bin_value = h_cuts_values.at(bin);
ASSERT_EQ(AsCat(x[i]), AsCat(bin_value));
}
}
struct ReadRowFunction {
EllpackDeviceAccessor matrix;
int row;
bst_float* row_data_d;
ReadRowFunction(EllpackDeviceAccessor matrix, int row, bst_float* row_data_d)
: matrix(std::move(matrix)), row(row), row_data_d(row_data_d) {}
__device__ void operator()(size_t col) {
auto value = matrix.GetFvalue(row, col);
if (isnan(value)) {
value = -1;
}
row_data_d[col] = value;
}
};
TEST(EllpackPage, Copy) {
constexpr size_t kRows = 1024;
constexpr size_t kCols = 16;
constexpr size_t kPageSize = 1024;
// Create a DMatrix with multiple batches.
dmlc::TemporaryDirectory tmpdir;
std::unique_ptr<DMatrix>
dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, kPageSize, true, tmpdir));
BatchParam param{0, 256, kPageSize};
auto page = (*dmat->GetBatches<EllpackPage>(param).begin()).Impl();
// Create an empty result page.
EllpackPageImpl result(0, page->Cuts(), page->is_dense, page->row_stride,
kRows);
// Copy batch pages into the result page.
size_t offset = 0;
for (auto& batch : dmat->GetBatches<EllpackPage>(param)) {
size_t num_elements = result.Copy(0, batch.Impl(), offset);
offset += num_elements;
}
size_t current_row = 0;
thrust::device_vector<bst_float> row_d(kCols);
thrust::device_vector<bst_float> row_result_d(kCols);
std::vector<bst_float> row(kCols);
std::vector<bst_float> row_result(kCols);
for (auto& page : dmat->GetBatches<EllpackPage>(param)) {
auto impl = page.Impl();
EXPECT_EQ(impl->base_rowid, current_row);
for (size_t i = 0; i < impl->Size(); i++) {
dh::LaunchN(0, kCols, ReadRowFunction(impl->GetDeviceAccessor(0), current_row, row_d.data().get()));
thrust::copy(row_d.begin(), row_d.end(), row.begin());
dh::LaunchN(0, kCols, ReadRowFunction(result.GetDeviceAccessor(0), current_row, row_result_d.data().get()));
thrust::copy(row_result_d.begin(), row_result_d.end(), row_result.begin());
EXPECT_EQ(row, row_result);
current_row++;
}
}
}
TEST(EllpackPage, Compact) {
constexpr size_t kRows = 16;
constexpr size_t kCols = 2;
constexpr size_t kPageSize = 1;
constexpr size_t kCompactedRows = 8;
// Create a DMatrix with multiple batches.
dmlc::TemporaryDirectory tmpdir;
std::unique_ptr<DMatrix>
dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, kPageSize, true, tmpdir));
BatchParam param{0, 256, kPageSize};
auto page = (*dmat->GetBatches<EllpackPage>(param).begin()).Impl();
// Create an empty result page.
EllpackPageImpl result(0, page->Cuts(), page->is_dense, page->row_stride,
kCompactedRows);
// Compact batch pages into the result page.
std::vector<size_t> row_indexes_h {
SIZE_MAX, 0, 1, 2, SIZE_MAX, 3, SIZE_MAX, 4, 5, SIZE_MAX, 6, SIZE_MAX, 7, SIZE_MAX, SIZE_MAX,
SIZE_MAX};
thrust::device_vector<size_t> row_indexes_d = row_indexes_h;
common::Span<size_t> row_indexes_span(row_indexes_d.data().get(), kRows);
for (auto& batch : dmat->GetBatches<EllpackPage>(param)) {
result.Compact(0, batch.Impl(), row_indexes_span);
}
size_t current_row = 0;
thrust::device_vector<bst_float> row_d(kCols);
thrust::device_vector<bst_float> row_result_d(kCols);
std::vector<bst_float> row(kCols);
std::vector<bst_float> row_result(kCols);
for (auto& page : dmat->GetBatches<EllpackPage>(param)) {
auto impl = page.Impl();
EXPECT_EQ(impl->base_rowid, current_row);
for (size_t i = 0; i < impl->Size(); i++) {
size_t compacted_row = row_indexes_h[current_row];
if (compacted_row == SIZE_MAX) {
current_row++;
continue;
}
dh::LaunchN(0, kCols, ReadRowFunction(impl->GetDeviceAccessor(0), current_row, row_d.data().get()));
dh::safe_cuda (cudaDeviceSynchronize());
thrust::copy(row_d.begin(), row_d.end(), row.begin());
dh::LaunchN(0, kCols,
ReadRowFunction(result.GetDeviceAccessor(0), compacted_row, row_result_d.data().get()));
thrust::copy(row_result_d.begin(), row_result_d.end(), row_result.begin());
EXPECT_EQ(row, row_result);
current_row++;
}
}
}
} // namespace xgboost
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.