hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
935fb10d72ad9a70b1c000a37f1ba726ba2dbc0f.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal s
*/
#include "common_magma.h"
#include "commonblas_s.h"
#define magmablas_sgemm_tesla magmablas_sgemm
extern "C" void
magmablas_sgemm_tesla( char TRANSA, char TRANSB, magma_int_t m, magma_int_t n, magma_int_t k,
float alpha, const float *A, magma_int_t lda,
const float *B, magma_int_t ldb,
float beta, float *C, magma_int_t ldc)
{
/* -- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
Purpose
=======
SGEMM performs one of the matrix-matrix operations
C := alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or op( X ) = X',
alpha and beta are scalars, and A, B and C are matrices, with op( A )
an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
==========
TRANSA - CHARACTER*1.
On entry, TRANSA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
TRANSA = 'N' or 'n', op( A ) = A.
TRANSA = 'T' or 't', op( A ) = A'.
TRANSA = 'C' or 'c', op( A ) = A'.
Unchanged on exit.
TRANSB - CHARACTER*1.
On entry, TRANSB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
TRANSB = 'N' or 'n', op( B ) = B.
TRANSB = 'T' or 't', op( B ) = B'.
TRANSB = 'C' or 'c', op( B ) = B'.
Unchanged on exit.
M - INTEGER.
On entry, M specifies the number of rows of the matrix
op( A ) and of the matrix C. M must be at least zero.
Unchanged on exit.
N - INTEGER.
On entry, N specifies the number of columns of the matrix
op( B ) and the number of columns of the matrix C. N must be
at least zero.
Unchanged on exit.
K - INTEGER.
On entry, K specifies the number of columns of the matrix
op( A ) and the number of rows of the matrix op( B ). K must
be at least zero.
Unchanged on exit.
ALPHA - SINGLE PRECISION.
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A - SINGLE PRECISION array of DIMENSION ( LDA, ka ), where ka is
k when TRANSA = 'N' or 'n', and is m otherwise.
Before entry with TRANSA = 'N' or 'n', the leading m by k
part of the array A must contain the matrix A, otherwise
the leading k by m part of the array A must contain the
matrix A.
Unchanged on exit.
LDA - INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When TRANSA = 'N' or 'n' then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
Unchanged on exit.
B - SINGLE PRECISION array of DIMENSION ( LDB, kb ), where kb is
n when TRANSB = 'N' or 'n', and is k otherwise.
Before entry with TRANSB = 'N' or 'n', the leading k by n
part of the array B must contain the matrix B, otherwise
the leading n by k part of the array B must contain the
matrix B.
Unchanged on exit.
LDB - INTEGER.
On entry, LDB specifies the first dimension of B as declared
in the calling (sub) program. When TRANSB = 'N' or 'n' then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
Unchanged on exit.
BETA - SINGLE PRECISION.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then C need not be set on input.
Unchanged on exit.
C - SINGLE PRECISION array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array C must
contain the matrix C, except when beta is zero, in which
case C need not be set on entry.
On exit, the array C is overwritten by the m by n matrix
( alpha*op( A )*op( B ) + beta*C ).
LDC - INTEGER.
On entry, LDC specifies the first dimension of C as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
Unchanged on exit.
===================================================================== */
if(m==0 || n==0 || ( ( alpha==0 || k==0 ) && beta ==1 ) ){
return ;
}
TRANSA = toupper( TRANSA ) ;
TRANSB = toupper( TRANSB ) ;
if( alpha == 0.0){
if( beta == 0.0){
magmablas_sgemm_kernel_ab_0( C,A,B, m, n,k,lda,ldb, ldc, alpha, beta);
return ;
}
else{
magmablas_sgemm_kernel_a_0( C,A,B, m, n,k,lda,ldb, ldc, alpha, beta);
return ;
}
}
if(ldc < m ) return ;
if(TRANSA=='N'){
if(TRANSB=='N')
{
if(lda < m ) return ;
if(ldb < k ) return ;
/*=======================================================================
===================C = alpha * A * B + beta * C =======================
=======================================================================*/
if( m > 512 && n > 512 ){
if( m % 64 == 0 && n%16 == 0 && k%16 == 0 )
magmablas_sgemm_kernel_N_N_64_16_16_16_4_special( C,A,B,
m, n,k,lda,ldb, ldc, alpha, beta);
else
magmablas_sgemm_kernel_N_N_64_16_16_16_4(C,A,B, m, n, k,
lda,ldb, ldc, alpha, beta);
}
else{
hipblasSgemm(TRANSA, TRANSB, m, n, k,
alpha, A, lda, B, ldb, beta, C, ldc );
}
}
else{
if(lda < m ) return ;
if(ldb < n ) return ;
/*=======================================================================
===================C = alpha * A * B^T + beta * C======================
=======================================================================*/
if( m > 512 && n > 512 ){
if( m%64 == 0 && n %16 ==0 && k%4==0)
magmablas_sgemm_kernel_N_T_64_16_4_16_4(C,A,B, m, n, k,
lda,ldb, ldc, alpha, beta);
else
magmablas_sgemm_kernel_N_T_64_16_4_16_4(C,A,B, m, n, k,
lda,ldb, ldc, alpha, beta);
}
else{
hipblasSgemm(TRANSA, TRANSB, m, n, k,
alpha, A, lda, B, ldb, beta, C, ldc );
}
}
}
else{
if(TRANSB=='N'){
if(lda < k ) return ;
if(ldb < k ) return ;
/*=======================================================================
===================C = alpha * A^T * B + beta * C======================
=======================================================================*/
if(m>512 && n > 512){
if( m%32 == 0 && n %32 ==0 && k%8==0)
magmablas_sgemm_kernel_T_N_32_32_8_8_8(C,A,B, m, n, k,
lda,ldb, ldc, alpha, beta);
else
magmablas_sgemm_kernel_T_N_32_32_8_8_8(C,A,B, m, n, k,
lda,ldb, ldc, alpha, beta);
}
else{
hipblasSgemm(TRANSA, TRANSB, m, n, k,
alpha, A, lda, B, ldb, beta, C, ldc );
}
}
else{
if(lda < k) return ;
if(ldb < n ) return ;
/*=======================================================================
===================C = alpha * A^T* B^T + beta * C=====================
=======================================================================*/
if( m > 512 && n > 512 ){
if( m%64 == 0 && n %16 ==0 && k%16==0)
magmablas_sgemm_kernel_T_T_64_16_16_16_4(C,B,A, n, m, k,
ldb,lda, ldc, alpha, beta);
else
magmablas_sgemm_kernel_T_T_64_16_16_16_4(C,B,A, n, m, k,
ldb,lda, ldc, alpha, beta);
}
else{
hipblasSgemm(TRANSA, TRANSB, m, n, k,
alpha, A, lda, B, ldb, beta, C, ldc );
}
}
}
}
| 935fb10d72ad9a70b1c000a37f1ba726ba2dbc0f.cu | /*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal s
*/
#include "common_magma.h"
#include "commonblas_s.h"
#define magmablas_sgemm_tesla magmablas_sgemm
extern "C" void
magmablas_sgemm_tesla( char TRANSA, char TRANSB, magma_int_t m, magma_int_t n, magma_int_t k,
float alpha, const float *A, magma_int_t lda,
const float *B, magma_int_t ldb,
float beta, float *C, magma_int_t ldc)
{
/* -- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
Purpose
=======
SGEMM performs one of the matrix-matrix operations
C := alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or op( X ) = X',
alpha and beta are scalars, and A, B and C are matrices, with op( A )
an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
==========
TRANSA - CHARACTER*1.
On entry, TRANSA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
TRANSA = 'N' or 'n', op( A ) = A.
TRANSA = 'T' or 't', op( A ) = A'.
TRANSA = 'C' or 'c', op( A ) = A'.
Unchanged on exit.
TRANSB - CHARACTER*1.
On entry, TRANSB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
TRANSB = 'N' or 'n', op( B ) = B.
TRANSB = 'T' or 't', op( B ) = B'.
TRANSB = 'C' or 'c', op( B ) = B'.
Unchanged on exit.
M - INTEGER.
On entry, M specifies the number of rows of the matrix
op( A ) and of the matrix C. M must be at least zero.
Unchanged on exit.
N - INTEGER.
On entry, N specifies the number of columns of the matrix
op( B ) and the number of columns of the matrix C. N must be
at least zero.
Unchanged on exit.
K - INTEGER.
On entry, K specifies the number of columns of the matrix
op( A ) and the number of rows of the matrix op( B ). K must
be at least zero.
Unchanged on exit.
ALPHA - SINGLE PRECISION.
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A - SINGLE PRECISION array of DIMENSION ( LDA, ka ), where ka is
k when TRANSA = 'N' or 'n', and is m otherwise.
Before entry with TRANSA = 'N' or 'n', the leading m by k
part of the array A must contain the matrix A, otherwise
the leading k by m part of the array A must contain the
matrix A.
Unchanged on exit.
LDA - INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When TRANSA = 'N' or 'n' then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
Unchanged on exit.
B - SINGLE PRECISION array of DIMENSION ( LDB, kb ), where kb is
n when TRANSB = 'N' or 'n', and is k otherwise.
Before entry with TRANSB = 'N' or 'n', the leading k by n
part of the array B must contain the matrix B, otherwise
the leading n by k part of the array B must contain the
matrix B.
Unchanged on exit.
LDB - INTEGER.
On entry, LDB specifies the first dimension of B as declared
in the calling (sub) program. When TRANSB = 'N' or 'n' then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
Unchanged on exit.
BETA - SINGLE PRECISION.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then C need not be set on input.
Unchanged on exit.
C - SINGLE PRECISION array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array C must
contain the matrix C, except when beta is zero, in which
case C need not be set on entry.
On exit, the array C is overwritten by the m by n matrix
( alpha*op( A )*op( B ) + beta*C ).
LDC - INTEGER.
On entry, LDC specifies the first dimension of C as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
Unchanged on exit.
===================================================================== */
if(m==0 || n==0 || ( ( alpha==0 || k==0 ) && beta ==1 ) ){
return ;
}
TRANSA = toupper( TRANSA ) ;
TRANSB = toupper( TRANSB ) ;
if( alpha == 0.0){
if( beta == 0.0){
magmablas_sgemm_kernel_ab_0( C,A,B, m, n,k,lda,ldb, ldc, alpha, beta);
return ;
}
else{
magmablas_sgemm_kernel_a_0( C,A,B, m, n,k,lda,ldb, ldc, alpha, beta);
return ;
}
}
if(ldc < m ) return ;
if(TRANSA=='N'){
if(TRANSB=='N')
{
if(lda < m ) return ;
if(ldb < k ) return ;
/*=======================================================================
===================C = alpha * A * B + beta * C =======================
=======================================================================*/
if( m > 512 && n > 512 ){
if( m % 64 == 0 && n%16 == 0 && k%16 == 0 )
magmablas_sgemm_kernel_N_N_64_16_16_16_4_special( C,A,B,
m, n,k,lda,ldb, ldc, alpha, beta);
else
magmablas_sgemm_kernel_N_N_64_16_16_16_4(C,A,B, m, n, k,
lda,ldb, ldc, alpha, beta);
}
else{
cublasSgemm(TRANSA, TRANSB, m, n, k,
alpha, A, lda, B, ldb, beta, C, ldc );
}
}
else{
if(lda < m ) return ;
if(ldb < n ) return ;
/*=======================================================================
===================C = alpha * A * B^T + beta * C======================
=======================================================================*/
if( m > 512 && n > 512 ){
if( m%64 == 0 && n %16 ==0 && k%4==0)
magmablas_sgemm_kernel_N_T_64_16_4_16_4(C,A,B, m, n, k,
lda,ldb, ldc, alpha, beta);
else
magmablas_sgemm_kernel_N_T_64_16_4_16_4(C,A,B, m, n, k,
lda,ldb, ldc, alpha, beta);
}
else{
cublasSgemm(TRANSA, TRANSB, m, n, k,
alpha, A, lda, B, ldb, beta, C, ldc );
}
}
}
else{
if(TRANSB=='N'){
if(lda < k ) return ;
if(ldb < k ) return ;
/*=======================================================================
===================C = alpha * A^T * B + beta * C======================
=======================================================================*/
if(m>512 && n > 512){
if( m%32 == 0 && n %32 ==0 && k%8==0)
magmablas_sgemm_kernel_T_N_32_32_8_8_8(C,A,B, m, n, k,
lda,ldb, ldc, alpha, beta);
else
magmablas_sgemm_kernel_T_N_32_32_8_8_8(C,A,B, m, n, k,
lda,ldb, ldc, alpha, beta);
}
else{
cublasSgemm(TRANSA, TRANSB, m, n, k,
alpha, A, lda, B, ldb, beta, C, ldc );
}
}
else{
if(lda < k) return ;
if(ldb < n ) return ;
/*=======================================================================
===================C = alpha * A^T* B^T + beta * C=====================
=======================================================================*/
if( m > 512 && n > 512 ){
if( m%64 == 0 && n %16 ==0 && k%16==0)
magmablas_sgemm_kernel_T_T_64_16_16_16_4(C,B,A, n, m, k,
ldb,lda, ldc, alpha, beta);
else
magmablas_sgemm_kernel_T_T_64_16_16_16_4(C,B,A, n, m, k,
ldb,lda, ldc, alpha, beta);
}
else{
cublasSgemm(TRANSA, TRANSB, m, n, k,
alpha, A, lda, B, ldb, beta, C, ldc );
}
}
}
}
|
39fc90e524acc5674af6ffe2abed4b9cfc4770e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
#include <stdio.h>
__global__ void test2(int n, double *a)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; //output x
if(i < n){
a[i] = i;
}
}
| 39fc90e524acc5674af6ffe2abed4b9cfc4770e7.cu | extern "C"
#include <stdio.h>
__global__ void test2(int n, double *a)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; //output x
if(i < n){
a[i] = i;
}
}
|
5e639b5bba23ef508c0e3cc02ba342a45b8394aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <limits>
using namespace std;
// Constantes
const double MENOS_INFINITO = -numeric_limits<double>::max();
const size_t BLOCK_SIZE = 128;
__device__ unsigned int contadorBloques = 0;
__device__ double logaritmoDeterminante(double *g_L, const size_t k, const size_t numDimensiones)
{
double suma = 0.0;
for (size_t j = 0; j < numDimensiones; j++) {
suma += log(g_L[k * numDimensiones * numDimensiones + j * numDimensiones + j]);
}
return 2.0 * suma;
}
template <size_t blockSize>
__device__ void reducirBloque(volatile double *sharedData, double suma, const size_t tid)
{
sharedData[tid] = suma;
__syncthreads();
if (blockSize >= 512) {
if (tid < 256) {
sharedData[tid] = suma = suma + sharedData[tid + 256];
}
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) {
sharedData[tid] = suma = suma + sharedData[tid + 128];
}
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) {
sharedData[tid] = suma = suma + sharedData[tid + 64];
}
__syncthreads();
}
if (tid < 32) {
if (blockSize >= 64) {
sharedData[tid] = suma = suma + sharedData[tid + 32];
}
if (blockSize >= 32) {
sharedData[tid] = suma = suma + sharedData[tid + 16];
}
if (blockSize >= 16) {
sharedData[tid] = suma = suma + sharedData[tid + 8];
}
if (blockSize >= 8) {
sharedData[tid] = suma = suma + sharedData[tid + 4];
}
if (blockSize >= 4) {
sharedData[tid] = suma = suma + sharedData[tid + 2];
}
if (blockSize >= 2) {
sharedData[tid] = suma = suma + sharedData[tid + 1];
}
}
}
template <size_t blockSize, typename Predicate, typename Predicate2>
__device__ void reducirFinal(Predicate valor, Predicate2 direccionResultado, volatile double *sharedData, size_t numTrozos)
{
const size_t tid = threadIdx.x;
double suma = 0.0;
int i = tid;
while (i < numTrozos)
{
suma += *(valor(i));
i += blockSize;
}
reducirBloque<blockSize>(sharedData, suma, tid);
if (tid == 0) {
*(direccionResultado()) = sharedData[0];
}
}
template <size_t blockSize, typename Predicate, typename Predicate2, typename Predicate3>
__device__ void reducir(Predicate valor, Predicate2 direccionResultado, Predicate3 reduccionFinal, const size_t n, volatile double *sharedData, const size_t numBloques)
{
__shared__ bool esUltimoBloque;
const size_t tid = threadIdx.x;
const size_t gridSize = (blockSize * 2) * gridDim.x;
size_t i = blockIdx.x * (blockSize * 2) + threadIdx.x;
double suma = 0.0;
while (i < n) {
suma += valor(i);
if (i + blockSize < n) {
suma += valor(i+blockSize);
}
i += gridSize;
}
reducirBloque<blockSize>(sharedData, suma, tid);
if (tid == 0) {
*(direccionResultado()) = sharedData[0];
__threadfence();
unsigned int ticket = atomicInc(&contadorBloques, numBloques);
esUltimoBloque = (ticket == numBloques - 1);
}
__syncthreads();
if (esUltimoBloque) {
reduccionFinal();
if (tid == 0) {
contadorBloques = 0;
}
}
}
| 5e639b5bba23ef508c0e3cc02ba342a45b8394aa.cu | #include <limits>
using namespace std;
// Constantes
const double MENOS_INFINITO = -numeric_limits<double>::max();
const size_t BLOCK_SIZE = 128;
__device__ unsigned int contadorBloques = 0;
__device__ double logaritmoDeterminante(double *g_L, const size_t k, const size_t numDimensiones)
{
double suma = 0.0;
for (size_t j = 0; j < numDimensiones; j++) {
suma += log(g_L[k * numDimensiones * numDimensiones + j * numDimensiones + j]);
}
return 2.0 * suma;
}
template <size_t blockSize>
__device__ void reducirBloque(volatile double *sharedData, double suma, const size_t tid)
{
sharedData[tid] = suma;
__syncthreads();
if (blockSize >= 512) {
if (tid < 256) {
sharedData[tid] = suma = suma + sharedData[tid + 256];
}
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) {
sharedData[tid] = suma = suma + sharedData[tid + 128];
}
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) {
sharedData[tid] = suma = suma + sharedData[tid + 64];
}
__syncthreads();
}
if (tid < 32) {
if (blockSize >= 64) {
sharedData[tid] = suma = suma + sharedData[tid + 32];
}
if (blockSize >= 32) {
sharedData[tid] = suma = suma + sharedData[tid + 16];
}
if (blockSize >= 16) {
sharedData[tid] = suma = suma + sharedData[tid + 8];
}
if (blockSize >= 8) {
sharedData[tid] = suma = suma + sharedData[tid + 4];
}
if (blockSize >= 4) {
sharedData[tid] = suma = suma + sharedData[tid + 2];
}
if (blockSize >= 2) {
sharedData[tid] = suma = suma + sharedData[tid + 1];
}
}
}
template <size_t blockSize, typename Predicate, typename Predicate2>
__device__ void reducirFinal(Predicate valor, Predicate2 direccionResultado, volatile double *sharedData, size_t numTrozos)
{
const size_t tid = threadIdx.x;
double suma = 0.0;
int i = tid;
while (i < numTrozos)
{
suma += *(valor(i));
i += blockSize;
}
reducirBloque<blockSize>(sharedData, suma, tid);
if (tid == 0) {
*(direccionResultado()) = sharedData[0];
}
}
template <size_t blockSize, typename Predicate, typename Predicate2, typename Predicate3>
__device__ void reducir(Predicate valor, Predicate2 direccionResultado, Predicate3 reduccionFinal, const size_t n, volatile double *sharedData, const size_t numBloques)
{
__shared__ bool esUltimoBloque;
const size_t tid = threadIdx.x;
const size_t gridSize = (blockSize * 2) * gridDim.x;
size_t i = blockIdx.x * (blockSize * 2) + threadIdx.x;
double suma = 0.0;
while (i < n) {
suma += valor(i);
if (i + blockSize < n) {
suma += valor(i+blockSize);
}
i += gridSize;
}
reducirBloque<blockSize>(sharedData, suma, tid);
if (tid == 0) {
*(direccionResultado()) = sharedData[0];
__threadfence();
unsigned int ticket = atomicInc(&contadorBloques, numBloques);
esUltimoBloque = (ticket == numBloques - 1);
}
__syncthreads();
if (esUltimoBloque) {
reduccionFinal();
if (tid == 0) {
contadorBloques = 0;
}
}
}
|
aa58b75425815ca8cb7de662cbe1e585dde2548f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
int N = 2<<24;
int size = N * sizeof(int);
int *host_array;
int *device_array;
hipHostMalloc(&host_array, size); // Pinned host memory allocation.
hipMalloc(&device_array, size); // Allocation directly on the active GPU device.
initializeData(host_array, N); // Assume this application needs to initialize on the host.
const int numberOfSegments = 4; // This example demonstrates slicing the work into 4 segments.
int segmentN = N / numberOfSegments; // A value for a segment's worth of `N` is needed.
size_t segmentSize = size / numberOfSegments; // A value for a segment's worth of `size` is needed.
// For each of the 4 segments...
for (int i = 0; i < numberOfSegments; ++i)
{
// Calculate the index where this particular segment should operate within the larger arrays.
segmentOffset = i * segmentN;
// Create a stream for this segment's worth of copy and work.
hipStream_t stream;
hipStreamCreate(&stream);
// Asynchronously copy segment's worth of pinned host memory to device over non-default stream.
hipMemcpyAsync(&device_array[segmentOffset], // Take care to access correct location in array.
&host_array[segmentOffset], // Take care to access correct location in array.
segmentSize, // Only copy a segment's worth of memory.
hipMemcpyHostToDevice,
stream); // Provide optional argument for non-default stream.
// Execute segment's worth of work over same non-default stream as memory copy.
hipLaunchKernelGGL(( kernel), dim3(number_of_blocks), dim3(threads_per_block), 0, stream, &device_array[segmentOffset], segmentN);
// `hipStreamDestroy` will return immediately (is non-blocking), but will not actually destroy stream until
// all stream operations are complete.
hipStreamDestroy(stream);
} | aa58b75425815ca8cb7de662cbe1e585dde2548f.cu | int N = 2<<24;
int size = N * sizeof(int);
int *host_array;
int *device_array;
cudaMallocHost(&host_array, size); // Pinned host memory allocation.
cudaMalloc(&device_array, size); // Allocation directly on the active GPU device.
initializeData(host_array, N); // Assume this application needs to initialize on the host.
const int numberOfSegments = 4; // This example demonstrates slicing the work into 4 segments.
int segmentN = N / numberOfSegments; // A value for a segment's worth of `N` is needed.
size_t segmentSize = size / numberOfSegments; // A value for a segment's worth of `size` is needed.
// For each of the 4 segments...
for (int i = 0; i < numberOfSegments; ++i)
{
// Calculate the index where this particular segment should operate within the larger arrays.
segmentOffset = i * segmentN;
// Create a stream for this segment's worth of copy and work.
cudaStream_t stream;
cudaStreamCreate(&stream);
// Asynchronously copy segment's worth of pinned host memory to device over non-default stream.
cudaMemcpyAsync(&device_array[segmentOffset], // Take care to access correct location in array.
&host_array[segmentOffset], // Take care to access correct location in array.
segmentSize, // Only copy a segment's worth of memory.
cudaMemcpyHostToDevice,
stream); // Provide optional argument for non-default stream.
// Execute segment's worth of work over same non-default stream as memory copy.
kernel<<<number_of_blocks, threads_per_block, 0, stream>>>(&device_array[segmentOffset], segmentN);
// `cudaStreamDestroy` will return immediately (is non-blocking), but will not actually destroy stream until
// all stream operations are complete.
cudaStreamDestroy(stream);
} |
reorder_one_bit.hip | // !!! This is a file automatically generated by hipify!!!
#include "reorder_one_bit.cuh"
#include "reorder_one_bit_impl.cuh"
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <library/cpp/cuda/wrappers/cub_include.h>
#include _CUB_INCLUDE(hipcub/hipcub.hpp)
namespace NKernel {
template <class T>
void ReorderOneBit(
ui32 size,
TReorderOneBitContext<ui32, T> context,
ui32* keys,
T* values,
int bit,
TCudaStream stream) {
if (size) {
hipMemcpyAsync(context.TempValues.Get(), values, sizeof(T) * size, hipMemcpyDefault, stream);
hipMemcpyAsync(context.TempKeys.Get(), keys, sizeof(ui32) * size, hipMemcpyDefault, stream);
{
using TInput = TScanBitIterator<ui32>;
TInput inputIter(context.TempKeys.Get(), bit);
hipcub::DeviceScan::ExclusiveSum < TInput, int*> (context.ScanTempBuffer.Get(),
context.ScanTempBufferSize,
inputIter,
context.Offsets.Get(),
size,
stream);
}
const int blockSize = 512;
const int N = 1;
const int numBlocks = (size + (N * blockSize) - 1) / (N * blockSize);
ReorderOneBitImpl<ui32, ui32, N, blockSize> << < numBlocks, blockSize, 0, stream >> > (
context.TempKeys,
context.TempValues,
context.Offsets,
bit,
keys,
values,
size);
}
}
ui64 ReorderBitTempSize(ui32 size) {
ui64 sizeInBytes = 0;
using TInput = TScanBitIterator<ui32>;
TInput fakeInput(nullptr, 0);
hipcub::DeviceScan::ExclusiveSum< TInput, int * > (nullptr,
sizeInBytes,
fakeInput,
nullptr,
size);
return sizeInBytes;
}
template void ReorderOneBit<ui32>(
ui32 size,
TReorderOneBitContext<ui32, ui32> context,
ui32* keys,
ui32* values,
int bit,
TCudaStream stream);
}
| reorder_one_bit.cu | #include "reorder_one_bit.cuh"
#include "reorder_one_bit_impl.cuh"
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <library/cpp/cuda/wrappers/cub_include.h>
#include _CUB_INCLUDE(cub/device/device_scan.cuh)
namespace NKernel {
template <class T>
void ReorderOneBit(
ui32 size,
TReorderOneBitContext<ui32, T> context,
ui32* keys,
T* values,
int bit,
TCudaStream stream) {
if (size) {
cudaMemcpyAsync(context.TempValues.Get(), values, sizeof(T) * size, cudaMemcpyDefault, stream);
cudaMemcpyAsync(context.TempKeys.Get(), keys, sizeof(ui32) * size, cudaMemcpyDefault, stream);
{
using TInput = TScanBitIterator<ui32>;
TInput inputIter(context.TempKeys.Get(), bit);
cub::DeviceScan::ExclusiveSum < TInput, int*> (context.ScanTempBuffer.Get(),
context.ScanTempBufferSize,
inputIter,
context.Offsets.Get(),
size,
stream);
}
const int blockSize = 512;
const int N = 1;
const int numBlocks = (size + (N * blockSize) - 1) / (N * blockSize);
ReorderOneBitImpl<ui32, ui32, N, blockSize> << < numBlocks, blockSize, 0, stream >> > (
context.TempKeys,
context.TempValues,
context.Offsets,
bit,
keys,
values,
size);
}
}
ui64 ReorderBitTempSize(ui32 size) {
ui64 sizeInBytes = 0;
using TInput = TScanBitIterator<ui32>;
TInput fakeInput(nullptr, 0);
cub::DeviceScan::ExclusiveSum< TInput, int * > (nullptr,
sizeInBytes,
fakeInput,
nullptr,
size);
return sizeInBytes;
}
template void ReorderOneBit<ui32>(
ui32 size,
TReorderOneBitContext<ui32, ui32> context,
ui32* keys,
ui32* values,
int bit,
TCudaStream stream);
}
|
09005da9997acb136dc70f91b8955ab6307070fb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime_api.h>
#define BASE_TYPE float
__global__ void add(BASE_TYPE *a, BASE_TYPE *b, BASE_TYPE *result, const int N)
{
int threads_count = blockDim.x * gridDim.x;
int elem_per_thread = N / threads_count;
int k = (blockIdx.x * blockDim.x + threadIdx.x) * elem_per_thread;
for (int i = k; i < k + elem_per_thread; i++)
{
result[i] = a[i] + b[i];
}
}
BASE_TYPE* gen_array(const int N)
{
BASE_TYPE *a = new BASE_TYPE[N];
for (int i = 0; i < N; i++)
a[i] = rand() % 100;
return a;
}
void print_array(const BASE_TYPE *a, const int N)
{
for (int i = 0; i < N; i++)
printf("%3.0f ", a[i]);
printf("\n");
}
void cuda_init_array(BASE_TYPE **dev, const BASE_TYPE *host, const size_t size)
{
hipError_t err;
err = hipMalloc((void **)dev, size);
if (err != hipSuccess)
throw err;
if (host != NULL)
{
err = hipMemcpy(*dev, host, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
throw err;
}
}
void cuda_init_grid_and_block(dim3 *grid, dim3 *block, const int N, const int threads_per_block)
{
int blocks_count = N / threads_per_block;
*grid = dim3(blocks_count);
*block = dim3(threads_per_block);
printf("Block %d %d %d\n", block->x, block->y, block->z);
printf("Grid %d %d %d\n", grid->x, grid->y, grid->z);
}
int main()
{
srand(time(NULL));
const int N = 32768;
const size_t size = N * sizeof(BASE_TYPE);
int threads_per_block;
scanf("%d", &threads_per_block);
dim3 threadsPerBlock, blocksPerGrid;
cuda_init_grid_and_block(&threadsPerBlock, &blocksPerGrid, N, threads_per_block);
hipEvent_t start, stop;
float h2d_cp_span, d2h_cp_span, k_span;
hipEventCreate(&start);
hipEventCreate(&stop);
BASE_TYPE *host_a = gen_array(N), *host_b = gen_array(N), *host_c = new BASE_TYPE[N];
BASE_TYPE *dev_a, *dev_b, *dev_c;
if (host_a == NULL || host_b == NULL || host_c == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
hipEventRecord(start, 0);
try
{
cuda_init_array(&dev_a, host_a, size);
cuda_init_array(&dev_b, host_b, size);
cuda_init_array(&dev_c, NULL, size);
}
catch (hipError_t err)
{
fprintf(stderr, "Failed to allocate device (error code: %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&h2d_cp_span, start, stop);
for(int i = 0; i < 100; i++)
hipLaunchKernelGGL(( add), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_a, dev_b, dev_c, N);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&k_span, start, stop);
hipMemcpy(host_c, dev_c, size, hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&d2h_cp_span, start, stop);
// printf("Copy form host to device time: %.2f milliseconds\n", h2d_cp_span);
printf("Run kernel time: %.2f milliseconds\n", (k_span - h2d_cp_span) / 100);
// printf("Copy form device to host time: %.2f milliseconds\n", d2h_cp_span);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
delete[] host_a;
delete[] host_b;
delete[] host_c;
return 0;
} | 09005da9997acb136dc70f91b8955ab6307070fb.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda_runtime_api.h>
#define BASE_TYPE float
__global__ void add(BASE_TYPE *a, BASE_TYPE *b, BASE_TYPE *result, const int N)
{
int threads_count = blockDim.x * gridDim.x;
int elem_per_thread = N / threads_count;
int k = (blockIdx.x * blockDim.x + threadIdx.x) * elem_per_thread;
for (int i = k; i < k + elem_per_thread; i++)
{
result[i] = a[i] + b[i];
}
}
BASE_TYPE* gen_array(const int N)
{
BASE_TYPE *a = new BASE_TYPE[N];
for (int i = 0; i < N; i++)
a[i] = rand() % 100;
return a;
}
void print_array(const BASE_TYPE *a, const int N)
{
for (int i = 0; i < N; i++)
printf("%3.0f ", a[i]);
printf("\n");
}
void cuda_init_array(BASE_TYPE **dev, const BASE_TYPE *host, const size_t size)
{
cudaError_t err;
err = cudaMalloc((void **)dev, size);
if (err != cudaSuccess)
throw err;
if (host != NULL)
{
err = cudaMemcpy(*dev, host, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
throw err;
}
}
void cuda_init_grid_and_block(dim3 *grid, dim3 *block, const int N, const int threads_per_block)
{
int blocks_count = N / threads_per_block;
*grid = dim3(blocks_count);
*block = dim3(threads_per_block);
printf("Block %d %d %d\n", block->x, block->y, block->z);
printf("Grid %d %d %d\n", grid->x, grid->y, grid->z);
}
int main()
{
srand(time(NULL));
const int N = 32768;
const size_t size = N * sizeof(BASE_TYPE);
int threads_per_block;
scanf("%d", &threads_per_block);
dim3 threadsPerBlock, blocksPerGrid;
cuda_init_grid_and_block(&threadsPerBlock, &blocksPerGrid, N, threads_per_block);
cudaEvent_t start, stop;
float h2d_cp_span, d2h_cp_span, k_span;
cudaEventCreate(&start);
cudaEventCreate(&stop);
BASE_TYPE *host_a = gen_array(N), *host_b = gen_array(N), *host_c = new BASE_TYPE[N];
BASE_TYPE *dev_a, *dev_b, *dev_c;
if (host_a == NULL || host_b == NULL || host_c == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
cudaEventRecord(start, 0);
try
{
cuda_init_array(&dev_a, host_a, size);
cuda_init_array(&dev_b, host_b, size);
cuda_init_array(&dev_c, NULL, size);
}
catch (cudaError_t err)
{
fprintf(stderr, "Failed to allocate device (error code: %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&h2d_cp_span, start, stop);
for(int i = 0; i < 100; i++)
add<<<blocksPerGrid, threadsPerBlock>>>(dev_a, dev_b, dev_c, N);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&k_span, start, stop);
cudaMemcpy(host_c, dev_c, size, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&d2h_cp_span, start, stop);
// printf("Copy form host to device time: %.2f milliseconds\n", h2d_cp_span);
printf("Run kernel time: %.2f milliseconds\n", (k_span - h2d_cp_span) / 100);
// printf("Copy form device to host time: %.2f milliseconds\n", d2h_cp_span);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
delete[] host_a;
delete[] host_b;
delete[] host_c;
return 0;
} |
64dad87b3a84cc93105f4f55203e2833ed311dd7.hip | // !!! This is a file automatically generated by hipify!!!
#include "option.h"
#include <opencv2/opencv.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#ifdef PARALLEL
#define HEIGHT 1024
#define WIDTH 1024
#define CHANNELS_N 3
#define BLOCKS_N 16
#define BLOCK_THREADS_N 64
#define STRIP_WIDTH 30
#define STRIP_R 255
#define STRIP_G 0
#define STRIP_B 0
__global__ void GetDiagonallyStrippedFrameKernel(uchar* const d_pOriginalFrame, uchar* const d_pStrippedFrame)
{
const unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= HEIGHT) // overflow
{
return;
}
for (int j = 0; j < WIDTH; ++j)
{
if (i % STRIP_WIDTH == j % STRIP_WIDTH)
{
d_pStrippedFrame[(i * WIDTH + j) * CHANNELS_N + 0] = STRIP_B;
d_pStrippedFrame[(i * WIDTH + j) * CHANNELS_N + 1] = STRIP_G;
d_pStrippedFrame[(i * WIDTH + j) * CHANNELS_N + 2] = STRIP_R;
}
else
{
for (int k = 0; k < CHANNELS_N; ++k)
{
d_pStrippedFrame[(i * WIDTH + j) * CHANNELS_N + k] = d_pOriginalFrame[(i * WIDTH + j) * CHANNELS_N + k];
}
}
}
}
int main()
{
// read image
cv::Mat kCar = cv::imread("car.jpg");
// start time recorder
hipEvent_t kStart;
hipEvent_t kStop;
hipEventCreate(&kStart);
hipEventCreate(&kStop);
hipEventRecord(kStart, 0);
// get original frame
const unsigned int FRAME_SIZE = kCar.rows * kCar.step;
uchar* const pFrame = (uchar*)malloc(FRAME_SIZE * sizeof(uchar));
for (int i = 0; i < HEIGHT; ++i)
{
for (int j = 0; j < WIDTH; ++j)
{
for (int k = 0; k < CHANNELS_N; ++k)
{
pFrame[(i * WIDTH + j) * CHANNELS_N + k] = kCar.at<cv::Vec3b>(i, j)[k];
}
}
}
// CUDA prepare [d]evice frames
uchar* d_pOriginalFrame;
uchar* d_pStrippedFrame;
hipMalloc((void**)&d_pOriginalFrame, FRAME_SIZE * sizeof(uchar));
hipMalloc((void**)&d_pStrippedFrame, FRAME_SIZE * sizeof(uchar));
hipMemcpy(d_pOriginalFrame, pFrame, FRAME_SIZE * sizeof(uchar), hipMemcpyHostToDevice);
// CUDA get [d]evice stripped frame
hipLaunchKernelGGL(( GetDiagonallyStrippedFrameKernel), dim3(BLOCKS_N), dim3(BLOCK_THREADS_N), 0, 0, d_pOriginalFrame, d_pStrippedFrame);
// CUDA write result onto frame
hipMemcpy(pFrame, d_pStrippedFrame, FRAME_SIZE * sizeof(uchar), hipMemcpyDeviceToHost);
// CUDA free [d]evice frames
hipFree(d_pOriginalFrame);
hipFree(d_pStrippedFrame);
// load frame to image
for (int i = 0; i < HEIGHT; ++i)
{
for (int j = 0; j < WIDTH; ++j)
{
for (int k = 0; k < CHANNELS_N; ++k)
{
kCar.at<cv::Vec3b>(i, j)[k] = pFrame[(i * WIDTH + j) * CHANNELS_N + k];
}
}
}
// free frame
free(pFrame);
// stop time recorder
hipEventRecord(kStop, 0);
hipEventSynchronize(kStop);
float fTimeMs = 0.f;
hipEventElapsedTime(&fTimeMs, kStart, kStop);
hipEventDestroy(kStart);
hipEventDestroy(kStop);
printf("Process data took me %f milliseconds.\n", fTimeMs);
// show image
cv::imshow("Image Horizontal Flip", kCar);
cv::waitKey(0);
return 0;
}
#endif | 64dad87b3a84cc93105f4f55203e2833ed311dd7.cu | #include "option.h"
#include <opencv2/opencv.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#ifdef PARALLEL
#define HEIGHT 1024
#define WIDTH 1024
#define CHANNELS_N 3
#define BLOCKS_N 16
#define BLOCK_THREADS_N 64
#define STRIP_WIDTH 30
#define STRIP_R 255
#define STRIP_G 0
#define STRIP_B 0
__global__ void GetDiagonallyStrippedFrameKernel(uchar* const d_pOriginalFrame, uchar* const d_pStrippedFrame)
{
const unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= HEIGHT) // overflow
{
return;
}
for (int j = 0; j < WIDTH; ++j)
{
if (i % STRIP_WIDTH == j % STRIP_WIDTH)
{
d_pStrippedFrame[(i * WIDTH + j) * CHANNELS_N + 0] = STRIP_B;
d_pStrippedFrame[(i * WIDTH + j) * CHANNELS_N + 1] = STRIP_G;
d_pStrippedFrame[(i * WIDTH + j) * CHANNELS_N + 2] = STRIP_R;
}
else
{
for (int k = 0; k < CHANNELS_N; ++k)
{
d_pStrippedFrame[(i * WIDTH + j) * CHANNELS_N + k] = d_pOriginalFrame[(i * WIDTH + j) * CHANNELS_N + k];
}
}
}
}
int main()
{
// read image
cv::Mat kCar = cv::imread("car.jpg");
// start time recorder
cudaEvent_t kStart;
cudaEvent_t kStop;
cudaEventCreate(&kStart);
cudaEventCreate(&kStop);
cudaEventRecord(kStart, 0);
// get original frame
const unsigned int FRAME_SIZE = kCar.rows * kCar.step;
uchar* const pFrame = (uchar*)malloc(FRAME_SIZE * sizeof(uchar));
for (int i = 0; i < HEIGHT; ++i)
{
for (int j = 0; j < WIDTH; ++j)
{
for (int k = 0; k < CHANNELS_N; ++k)
{
pFrame[(i * WIDTH + j) * CHANNELS_N + k] = kCar.at<cv::Vec3b>(i, j)[k];
}
}
}
// CUDA prepare [d]evice frames
uchar* d_pOriginalFrame;
uchar* d_pStrippedFrame;
cudaMalloc((void**)&d_pOriginalFrame, FRAME_SIZE * sizeof(uchar));
cudaMalloc((void**)&d_pStrippedFrame, FRAME_SIZE * sizeof(uchar));
cudaMemcpy(d_pOriginalFrame, pFrame, FRAME_SIZE * sizeof(uchar), cudaMemcpyHostToDevice);
// CUDA get [d]evice stripped frame
GetDiagonallyStrippedFrameKernel<<<BLOCKS_N, BLOCK_THREADS_N>>>(d_pOriginalFrame, d_pStrippedFrame);
// CUDA write result onto frame
cudaMemcpy(pFrame, d_pStrippedFrame, FRAME_SIZE * sizeof(uchar), cudaMemcpyDeviceToHost);
// CUDA free [d]evice frames
cudaFree(d_pOriginalFrame);
cudaFree(d_pStrippedFrame);
// load frame to image
for (int i = 0; i < HEIGHT; ++i)
{
for (int j = 0; j < WIDTH; ++j)
{
for (int k = 0; k < CHANNELS_N; ++k)
{
kCar.at<cv::Vec3b>(i, j)[k] = pFrame[(i * WIDTH + j) * CHANNELS_N + k];
}
}
}
// free frame
free(pFrame);
// stop time recorder
cudaEventRecord(kStop, 0);
cudaEventSynchronize(kStop);
float fTimeMs = 0.f;
cudaEventElapsedTime(&fTimeMs, kStart, kStop);
cudaEventDestroy(kStart);
cudaEventDestroy(kStop);
printf("Process data took me %f milliseconds.\n", fTimeMs);
// show image
cv::imshow("Image Horizontal Flip", kCar);
cv::waitKey(0);
return 0;
}
#endif |
70abedda184e71aa936f76113075708135894693.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.c"
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row
+ BLOCK_SIZE * col];
return Asub;
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE+1];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE+1];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
}
| 70abedda184e71aa936f76113075708135894693.cu | #include "common.c"
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row
+ BLOCK_SIZE * col];
return Asub;
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE+1];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE+1];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
}
|
d71838347ae7ccfd8006824f14f727097750e235.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Predicts the surface, i.e. performs raycasting
// This is CUDA code; compile with nvcc
// Author: Christian Diller, [email protected]
#include "include/common.h"
using Vec3ida = Eigen::Matrix<int, 3, 1, Eigen::DontAlign>;
namespace kinectfusion {
namespace internal {
namespace cuda {
__device__ __forceinline__
//
float interpolate_trilinearly(
const Vec3fda& point, // TSDF ()
const PtrStepSz<short2>& volume, // TSDF Volume
const int3& volume_size, // TSDF Volume
const float voxel_scale) // TSDF Volume
{
// ,
// Volume ,
Vec3ida point_in_grid = point.cast<int>();
//
const float vx = (static_cast<float>(point_in_grid.x()) + 0.5f);
const float vy = (static_cast<float>(point_in_grid.y()) + 0.5f);
const float vz = (static_cast<float>(point_in_grid.z()) + 0.5f);
// , -1;
// 1? ? :(x)
// ^x
// |
// |
// | * (TSDF) ++++++++++++
// | +
// | +(a)
// |--------------------------------------------------------+----------------
// | +
// | * point++++++++++++++
// | * vx TSDF++++++++++++(1-a) * vx ++++++++++++++++
// | * point ++++++++++++++
// | +
// |----* point_in_grid------+----------* point_in_grid----------------------------------
// | +
// | +
// | (*) TSDF +(a)
// | +
// | +
// |----* point_in_grid-1 ++++-------------------------------------------------------------
// |
//
point_in_grid.x() = (point.x() < vx) ? (point_in_grid.x() - 1) : point_in_grid.x();
point_in_grid.y() = (point.y() < vy) ? (point_in_grid.y() - 1) : point_in_grid.y();
point_in_grid.z() = (point.z() < vz) ? (point_in_grid.z() - 1) : point_in_grid.z();
// +0.5f , point_in_grid TSDFTSDF
// , ref: https://en.wikipedia.org/wiki/Trilinear_interpolation
// ()
const float a = (point.x() - (static_cast<float>(point_in_grid.x()) + 0.5f));
const float b = (point.y() - (static_cast<float>(point_in_grid.y()) + 0.5f));
const float c = (point.z() - (static_cast<float>(point_in_grid.z()) + 0.5f));
return
static_cast<float>(volume.ptr((point_in_grid.z()) * volume_size.y + point_in_grid.y())[point_in_grid.x()].x) * DIVSHORTMAX
// volume[ x ][ y ][ z ], C000
* (1 - a) * (1 - b) * (1 - c) +
static_cast<float>(volume.ptr((point_in_grid.z() + 1) * volume_size.y + point_in_grid.y())[point_in_grid.x()].x) * DIVSHORTMAX
// volume[ x ][ y ][z+1], C001
* (1 - a) * (1 - b) * c +
static_cast<float>(volume.ptr((point_in_grid.z()) * volume_size.y + point_in_grid.y() + 1)[point_in_grid.x()].x) * DIVSHORTMAX
// volume[ x ][y+1][ z ], C010
* (1 - a) * b * (1 - c) +
static_cast<float>(volume.ptr((point_in_grid.z() + 1) * volume_size.y + point_in_grid.y() + 1)[point_in_grid.x()].x) * DIVSHORTMAX
// volume[ x ][y+1][z+1], C011
* (1 - a) * b * c +
static_cast<float>(volume.ptr((point_in_grid.z()) * volume_size.y + point_in_grid.y())[point_in_grid.x() + 1].x) * DIVSHORTMAX
// volume[x+1][ y ][ z ], C100
* a * (1 - b) * (1 - c) +
static_cast<float>(volume.ptr((point_in_grid.z() + 1) * volume_size.y + point_in_grid.y())[point_in_grid.x() + 1].x) * DIVSHORTMAX
// volume[x+1][ y ][z+1], C101
* a * (1 - b) * c +
static_cast<float>(volume.ptr((point_in_grid.z()) * volume_size.y + point_in_grid.y() + 1)[point_in_grid.x() + 1].x) * DIVSHORTMAX
// volume[x+1][y+1][ z ], C110
* a * b * (1 - c) +
static_cast<float>(volume.ptr((point_in_grid.z() + 1) * volume_size.y + point_in_grid.y() + 1)[point_in_grid.x() + 1].x) * DIVSHORTMAX
// volume[x+1][y+1][z+1], C111
* a * b * c;
}
/*************************************************************
NOTE raycast, volume, volume
: Volume
xy, :
y^
|
|
|---------
| |
| volume |
| |
-------------|------------->x
|
|
raycast . :
tmin = ( volume )/(raycast)
, volume , :
tmax = ( volume )/(raycast)
,
final_min_time = max(txmin, tymin, tzmin)
final_min_time , () Volume , raycast
,Volume, :
final_max_time = min(txmax, tymax, tzmax)
final_max_time , volmue , volume, raycast
// ! , 0<ty<volume.size.ydirect.y > 0. tmin
**************************************************************/
// __forceinline__:
__device__ __forceinline__
// Volume, ()
float get_min_time(
const float3& volume_max, // ()
const Vec3fda& origin, // ,
const Vec3fda& direction) //
{
// , ; , volume
float txmin = ((direction.x() > 0 ? 0.f : volume_max.x) - origin.x()) / direction.x();
float tymin = ((direction.y() > 0 ? 0.f : volume_max.y) - origin.y()) / direction.y();
float tzmin = ((direction.z() > 0 ? 0.f : volume_max.z) - origin.z()) / direction.z();
return fmax(fmax(txmin, tymin), tzmin);
}
__device__ __forceinline__
// Volume, ()
float get_max_time(const float3& volume_max, const Vec3fda& origin, const Vec3fda& direction)
{
// , . , volume
float txmax = ((direction.x() > 0 ? volume_max.x : 0.f) - origin.x()) / direction.x();
float tymax = ((direction.y() > 0 ? volume_max.y : 0.f) - origin.y()) / direction.y();
float tzmax = ((direction.z() > 0 ? volume_max.z : 0.f) - origin.z()) / direction.z();
return fmin(fmin(txmax, tymax), tzmax);
}
__global__
void raycast_tsdf_kernel(
const PtrStepSz<short2> tsdf_volume, // Global TSDF Volume
const PtrStepSz<uchar3> color_volume, // Global Color Volume
PtrStepSz<float3> model_vertex, //
PtrStepSz<float3> model_normal, //
PtrStepSz<uchar3> model_color, //
const int3 volume_size, // Volume
const float voxel_scale, // Volume
const CameraParameters cam_parameters, //
const float truncation_distance, //
const Eigen::Matrix<float, 3, 3, Eigen::DontAlign> rotation, //
const Vec3fda translation) //
{
// step 0 ,
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
// :
if (x >= model_vertex.cols || y >= model_vertex.rows)
return;
// step 2 raycast , ,
// Volume
// ! , id, , 512*512*512
const float3 volume_range = make_float3(volume_size.x * voxel_scale,
volume_size.y * voxel_scale,
volume_size.z * voxel_scale);
// , ; Z
const Vec3fda pixel_position(
(x - cam_parameters.principal_x) / cam_parameters.focal_x, // X/Z
(y - cam_parameters.principal_y) / cam_parameters.focal_y, // Y/Z
1.f); // Z/Z
// (), : P_w = R_{wc} * P_c
Vec3fda ray_direction = (rotation * pixel_position);
ray_direction.normalize();
// fmax: CUDA float max()
// translation
// raycast
float ray_length = fmax(get_min_time(volume_range, translation, ray_direction), 0.f);
// :
if (ray_length >= get_max_time(volume_range, translation, ray_direction))
return;
// , volume
ray_length += voxel_scale;
Vec3fda grid = (translation + (ray_direction * ray_length)) / voxel_scale;
// Grid TSDF , TSDF
// volume , tsdf ,
// __float2int_rd:
float tsdf = static_cast<float>(tsdf_volume.ptr(
__float2int_rd(grid(2)) * volume_size.y + __float2int_rd(grid(1)))[__float2int_rd(grid(0))].x) *
DIVSHORTMAX;
// ( ray_length )
// ! 2 3
// ! SCALE
const float max_search_length = ray_length + volume_range.x * sqrt(2.f);
// step 3 , raycasting .
for (; ray_length < max_search_length; ray_length += truncation_distance * 0.5f) {
// step 3.1 TSDF
// , id
grid = ((translation + (ray_direction * (ray_length + truncation_distance * 0.5f))) / voxel_scale);
//
if (grid.x() < 1 || grid.x() >= volume_size.x - 1 || grid.y() < 1 ||
grid.y() >= volume_size.y - 1 ||
grid.z() < 1 || grid.z() >= volume_size.z - 1)
continue;
// TSDF ,
const float previous_tsdf = tsdf;
// Grid TSDF
tsdf = static_cast<float>(tsdf_volume.ptr(
__float2int_rd(grid(2)) * volume_size.y + __float2int_rd(grid(1)))[__float2int_rd(
grid(0))].x) *
DIVSHORTMAX;
// step 3.2
if (previous_tsdf < 0.f && tsdf > 0.f) //Zero crossing from behind
//
break;
if (previous_tsdf > 0.f && tsdf < 0.f) { //Zero crossing
// step 3.3 ,
// (), (15)
const float t_star =
ray_length - truncation_distance * 0.5f * previous_tsdf / (tsdf - previous_tsdf);
// . . vec3f
const auto vertex = translation + ray_direction * t_star;
// volume
const Vec3fda location_in_grid = (vertex / voxel_scale);
// , vertex Volume
if (location_in_grid.x() < 1 || location_in_grid.x() >= volume_size.x - 1 ||
location_in_grid.y() < 1 || location_in_grid.y() >= volume_size.y - 1 ||
location_in_grid.z() < 1 || location_in_grid.z() >= volume_size.z - 1)
break;
// step 3.4 x, y, z, Grid
// normal -
// shifted - ,
Vec3fda normal, shifted;
// step 3.4.1 x
shifted = location_in_grid;
// , ~
shifted.x() += 1;
if (shifted.x() >= volume_size.x - 1)
break;
// TSDF .
// shifted TSDF , Volume ,
// location_in_grid+1 shifted , TSDF,
const float Fx1 = interpolate_trilinearly(
shifted, // vertex Volume, Vec3fda
tsdf_volume, // TSDF Volume
volume_size, // Volume
voxel_scale); //
// ,
shifted = location_in_grid;
shifted.x() -= 1;
if (shifted.x() < 1)
break;
const float Fx2 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
// TSDF ,
// ! , 0
normal.x() = (Fx1 - Fx2);
// step 3.4.2 y
shifted = location_in_grid;
shifted.y() += 1;
if (shifted.y() >= volume_size.y - 1)
break;
const float Fy1 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
shifted = location_in_grid;
shifted.y() -= 1;
if (shifted.y() < 1)
break;
const float Fy2 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
normal.y() = (Fy1 - Fy2);
// step 3.4.3 z
shifted = location_in_grid;
shifted.z() += 1;
if (shifted.z() >= volume_size.z - 1)
break;
const float Fz1 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
shifted = location_in_grid;
shifted.z() -= 1;
if (shifted.z() < 1)
break;
const float Fz2 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
normal.z() = (Fz1 - Fz2);
// step 3.4.4 ,
if (normal.norm() == 0)
break;
// ,
normal.normalize();
// step 3.5
//
model_vertex.ptr(y)[x] = make_float3(vertex.x(), vertex.y(), vertex.z());
model_normal.ptr(y)[x] = make_float3(normal.x(), normal.y(), normal.z());
// step 3.6
// Volumeint
auto location_in_grid_int = location_in_grid.cast<int>();
// Color Volume ,
model_color.ptr(y)[x] = color_volume.ptr(
location_in_grid_int.z() * volume_size.y +
location_in_grid_int.y())[location_in_grid_int.x()];
break;
}
} // raycasting
}
//
void surface_prediction(
const VolumeData& volume, // Global Volume
GpuMat& model_vertex, //
GpuMat& model_normal, //
GpuMat& model_color, //
const CameraParameters& cam_parameters, //
const float truncation_distance, //
const Eigen::Matrix4f& pose) //
{
// step 0 : \\
model_vertex.setTo(0);
model_normal.setTo(0);
model_color.setTo(0);
// step 1 ,
dim3 threads(32, 32);
dim3 blocks((model_vertex.cols + threads.x - 1) / threads.x,
(model_vertex.rows + threads.y - 1) / threads.y);
// step 2
hipLaunchKernelGGL(( raycast_tsdf_kernel), dim3(blocks), dim3(threads), 0, 0,
volume.tsdf_volume, // Global TSDF Volume
volume.color_volume, // Global Color Volume
model_vertex, //
model_normal, //
model_color, //
volume.volume_size, // Volume
volume.voxel_scale, // Volume
cam_parameters, //
truncation_distance, //
pose.block(0, 0, 3, 3), //
pose.block(0, 3, 3, 1)); //
// step 3 ,
hipDeviceSynchronize();
}
}
}
} | d71838347ae7ccfd8006824f14f727097750e235.cu | // Predicts the surface, i.e. performs raycasting
// This is CUDA code; compile with nvcc
// Author: Christian Diller, [email protected]
#include "include/common.h"
using Vec3ida = Eigen::Matrix<int, 3, 1, Eigen::DontAlign>;
namespace kinectfusion {
namespace internal {
namespace cuda {
__device__ __forceinline__
// 三线型插值
float interpolate_trilinearly(
const Vec3fda& point, // 想要得到 TSDF 数值的点的坐标(非整数)
const PtrStepSz<short2>& volume, // TSDF Volume 对象
const int3& volume_size, // TSDF Volume 对象 大小
const float voxel_scale) // TSDF Volume 中的体素坐标和现实世界中长度的度量关系
{
// 本函数中考虑的, 都是
// 这个点在 Volume 下的坐标, 转换成为整数下标标的表示
Vec3ida point_in_grid = point.cast<int>();
// 恢复成体素中心点的坐标
const float vx = (static_cast<float>(point_in_grid.x()) + 0.5f);
const float vy = (static_cast<float>(point_in_grid.y()) + 0.5f);
const float vz = (static_cast<float>(point_in_grid.z()) + 0.5f);
// 查看原始的点的坐标是否更偏向于某个坐标轴上数值更小的一侧, 如果是的话就坐标-1; 否则不变
// 为什么要减1? 为什么在数值更大的一侧就不用减? 画图:(只看x轴)
// ^x
// |
// |
// | * (TSDF) ++++++++++++
// | +
// | +(a)
// |--------------------------------------------------------+----------------
// | +
// | * point++++++++++++++
// | * vx TSDF++++++++++++(1-a) * vx ++++++++++++++++
// | * point ++++++++++++++
// | +
// |----* point_in_grid------+----------* point_in_grid----------------------------------
// | +
// | +
// | (*) TSDF +(a)
// | +
// | +
// |----* point_in_grid-1 ++++-------------------------------------------------------------
// |
// 分成这两种情况是为了方便计算不同组织形式下的插值
point_in_grid.x() = (point.x() < vx) ? (point_in_grid.x() - 1) : point_in_grid.x();
point_in_grid.y() = (point.y() < vy) ? (point_in_grid.y() - 1) : point_in_grid.y();
point_in_grid.z() = (point.z() < vz) ? (point_in_grid.z() - 1) : point_in_grid.z();
// +0.5f 的原因是, point_in_grid 处体素存储的TSDF值是体素的中心点的TSDF值
// 三线型插值, ref: https://en.wikipedia.org/wiki/Trilinear_interpolation
// 计算精确的(浮点型)的点坐标和整型化之后的点坐标的差
const float a = (point.x() - (static_cast<float>(point_in_grid.x()) + 0.5f));
const float b = (point.y() - (static_cast<float>(point_in_grid.y()) + 0.5f));
const float c = (point.z() - (static_cast<float>(point_in_grid.z()) + 0.5f));
return
static_cast<float>(volume.ptr((point_in_grid.z()) * volume_size.y + point_in_grid.y())[point_in_grid.x()].x) * DIVSHORTMAX
// volume[ x ][ y ][ z ], C000
* (1 - a) * (1 - b) * (1 - c) +
static_cast<float>(volume.ptr((point_in_grid.z() + 1) * volume_size.y + point_in_grid.y())[point_in_grid.x()].x) * DIVSHORTMAX
// volume[ x ][ y ][z+1], C001
* (1 - a) * (1 - b) * c +
static_cast<float>(volume.ptr((point_in_grid.z()) * volume_size.y + point_in_grid.y() + 1)[point_in_grid.x()].x) * DIVSHORTMAX
// volume[ x ][y+1][ z ], C010
* (1 - a) * b * (1 - c) +
static_cast<float>(volume.ptr((point_in_grid.z() + 1) * volume_size.y + point_in_grid.y() + 1)[point_in_grid.x()].x) * DIVSHORTMAX
// volume[ x ][y+1][z+1], C011
* (1 - a) * b * c +
static_cast<float>(volume.ptr((point_in_grid.z()) * volume_size.y + point_in_grid.y())[point_in_grid.x() + 1].x) * DIVSHORTMAX
// volume[x+1][ y ][ z ], C100
* a * (1 - b) * (1 - c) +
static_cast<float>(volume.ptr((point_in_grid.z() + 1) * volume_size.y + point_in_grid.y())[point_in_grid.x() + 1].x) * DIVSHORTMAX
// volume[x+1][ y ][z+1], C101
* a * (1 - b) * c +
static_cast<float>(volume.ptr((point_in_grid.z()) * volume_size.y + point_in_grid.y() + 1)[point_in_grid.x() + 1].x) * DIVSHORTMAX
// volume[x+1][y+1][ z ], C110
* a * b * (1 - c) +
static_cast<float>(volume.ptr((point_in_grid.z() + 1) * volume_size.y + point_in_grid.y() + 1)[point_in_grid.x() + 1].x) * DIVSHORTMAX
// volume[x+1][y+1][z+1], C111
* a * b * c;
}
/*************************************************************
NOTE 下面的两个函数是为了得到在raycast过程中, 什么时候射线开始射入 volume, 什么时候射线射出 volume
这里有一个假设: 相机是一直都向Volume的方向观测的
如果只看x和y轴, 那么坐标系是这样定义的:
y^
|
|
|---------
| |
| volume |
| |
-------------|------------->x
|
|
首先想求的是在每个轴上进行 raycast 需要最短的时间. 在步长一致的情况下:
在每个轴上耗费的单位时间数tmin = (当前相机位置在这个轴上到 volume 的距离)/(raycast方向在这个轴上的分量)
类似地, 若要射线完整地穿过 volume 所需要的最长的时间, 对于每个轴:
在每个轴上耗费的时间单位数tmax = (当前相机位置在这个轴上到 volume 另一端的距离)/(raycast方向在这个轴上的分量)
而为了近似得到当前射线方向按照给定步长前进所需的最少时间, 程序这样计算
final_min_time = max(txmin, tymin, tzmin)
目的是保证当射线前进了 final_min_time 后, 所有的轴上(几乎)一定接触到了 Volume , 可以进行 raycast 过程了
类似地为了近似地得到当前射线方向按照给定步长前进,走出Volume所耗费的最少时间, 程序也使用了比较保守的策略:
final_max_time = min(txmax, tymax, tzmax)
这样能够确定经过了 final_max_time 之后, 射线在其中一个轴上就脱离 volmue 了, 相当于射线已经出了 volume, raycast就可以停止了
// ! 但是上述的分析在相机处于某些区域的时候可能站不住脚, 比如相机的位姿中 0<ty<volume.size.y的时候并且direct.y > 0. 得到的tmin是个负值
这个时候就会出现计算错误的情况
**************************************************************/
// __forceinline__: 强制为内联函数
__device__ __forceinline__
// 求射线为了射入Volume, 在给定步长下所需要的最少的前进次数(也可以理解为前进所需要的时间)
float get_min_time(
const float3& volume_max, // 体素的范围(真实尺度)
const Vec3fda& origin, // 出发点, 也就是相机当前的位置
const Vec3fda& direction) // 射线方向
{
// 分别计算三个轴上的次数, 并且返回其中最大; 当前进了这个最大的次数之后, 三个轴上射线的分量就都已经射入volume了
float txmin = ((direction.x() > 0 ? 0.f : volume_max.x) - origin.x()) / direction.x();
float tymin = ((direction.y() > 0 ? 0.f : volume_max.y) - origin.y()) / direction.y();
float tzmin = ((direction.z() > 0 ? 0.f : volume_max.z) - origin.z()) / direction.z();
return fmax(fmax(txmin, tymin), tzmin);
}
__device__ __forceinline__
// 求射线为了射出Volume, 在给定步长下所需要的最少的前进次数(也可以理解为前进所需要的时间)
float get_max_time(const float3& volume_max, const Vec3fda& origin, const Vec3fda& direction)
{
// 分别计算三个轴上的次数, 并且返回其中最小. 当前进了这个最小的次数后, 三个轴上的射线的分量中就有一个已经射出了volume了
float txmax = ((direction.x() > 0 ? volume_max.x : 0.f) - origin.x()) / direction.x();
float tymax = ((direction.y() > 0 ? volume_max.y : 0.f) - origin.y()) / direction.y();
float tzmax = ((direction.z() > 0 ? volume_max.z : 0.f) - origin.z()) / direction.z();
return fmin(fmin(txmax, tymax), tzmax);
}
__global__
void raycast_tsdf_kernel(
const PtrStepSz<short2> tsdf_volume, // Global TSDF Volume
const PtrStepSz<uchar3> color_volume, // Global Color Volume
PtrStepSz<float3> model_vertex, // 推理出来的顶点图
PtrStepSz<float3> model_normal, // 推理出来的法向图
PtrStepSz<uchar3> model_color, // 推理出来的颜色图
const int3 volume_size, // Volume 尺寸
const float voxel_scale, // Volume 缩放洗漱
const CameraParameters cam_parameters, // 当前图层相机内参
const float truncation_distance, // 截断距离
const Eigen::Matrix<float, 3, 3, Eigen::DontAlign> rotation, // 相机位姿的旋转矩阵
const Vec3fda translation) // 相机位姿的平移向量
{
// step 0 获取当前线程要处理的图像像素, 并且进行合法性检查
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
// 合法性检查: 判断是否在当前图层图像范围内
if (x >= model_vertex.cols || y >= model_vertex.rows)
return;
// step 2 计算 raycast 射线, 以及应该在何处开始, 在何处结束
// 计算 Volume 对应的空间范围
// ! 但是我觉得, 这个范围其实和当前的线程id没有关系, 我们完全可以离线计算啊, 这里让 512*512*512 的每一个线程都计算一次是不是太浪费计算资源了
const float3 volume_range = make_float3(volume_size.x * voxel_scale,
volume_size.y * voxel_scale,
volume_size.z * voxel_scale);
// 计算当前的点和相机光心的连线, 使用的是在当前相机坐标系下的坐标; 由于后面只是为了得到方向所以这里没有乘Z
const Vec3fda pixel_position(
(x - cam_parameters.principal_x) / cam_parameters.focal_x, // X/Z
(y - cam_parameters.principal_y) / cam_parameters.focal_y, // Y/Z
1.f); // Z/Z
// 得到这个连线的方向(从相机指向空间中的反投影射线)在世界坐标系下的表示, 联想: P_w = R_{wc} * P_c
Vec3fda ray_direction = (rotation * pixel_position);
ray_direction.normalize();
// fmax: CUDA 中 float 版的 max() 函数
// 参数 translation 应该理解为相机光心在世界坐标系下的坐标
// 获得 raycast 的起始位置
float ray_length = fmax(get_min_time(volume_range, translation, ray_direction), 0.f);
// 验证是否合法: 起始位置的射线长度应该小于等于结束位置的射线长度
if (ray_length >= get_max_time(volume_range, translation, ray_direction))
return;
// 在开始位置继续前进一个体素, 确保该位置已经接触到 volume
ray_length += voxel_scale;
Vec3fda grid = (translation + (ray_direction * ray_length)) / voxel_scale;
// 拿到 Grid 对应体素处的 TSDF 值, 这里充当当前射线的上一次的TSDF计算结果
// 如果拿到的坐标并不在 volume 中, 那么得到的 tsdf 值无法确定, 甚至可能会触发段错误
// __float2int_rd: 向下取整
float tsdf = static_cast<float>(tsdf_volume.ptr(
__float2int_rd(grid(2)) * volume_size.y + __float2int_rd(grid(1)))[__float2int_rd(grid(0))].x) *
DIVSHORTMAX;
// 计算最大搜索长度(考虑了光线开始“投射”的时候已经走过的路程 ray_length )
// ! 不明白这里为什么是根号2 而不是根号3
// ! 这里没有乘 SCALE 也应该有问题
const float max_search_length = ray_length + volume_range.x * sqrt(2.f);
// step 3 开始迭代搜索了, raycasting 开始. 步长为一半截断距离
for (; ray_length < max_search_length; ray_length += truncation_distance * 0.5f) {
// step 3.1 获取当前射线位置的 TSDF
// 计算当前次前进后, 射线到达的体素id
grid = ((translation + (ray_direction * (ray_length + truncation_distance * 0.5f))) / voxel_scale);
// 合法性检查
if (grid.x() < 1 || grid.x() >= volume_size.x - 1 || grid.y() < 1 ||
grid.y() >= volume_size.y - 1 ||
grid.z() < 1 || grid.z() >= volume_size.z - 1)
continue;
// 保存上一次的 TSDF 值, 用于进行下面的判断
const float previous_tsdf = tsdf;
// 计算当前 Grid 处的 TSDF 值
tsdf = static_cast<float>(tsdf_volume.ptr(
__float2int_rd(grid(2)) * volume_size.y + __float2int_rd(grid(1)))[__float2int_rd(
grid(0))].x) *
DIVSHORTMAX;
// step 3.2 判断是否穿过了平面
if (previous_tsdf < 0.f && tsdf > 0.f) //Zero crossing from behind
// 这种情况是从平面的后方穿出了
break;
if (previous_tsdf > 0.f && tsdf < 0.f) { //Zero crossing
// step 3.3 确实在当前的位置穿过了平面, 计算当前射线与该平面的交点
// 精确确定这个平面所在的位置(反映为射线的长度), 计算公式与论文中式(15)保持一致
const float t_star =
ray_length - truncation_distance * 0.5f * previous_tsdf / (tsdf - previous_tsdf);
// 计算射线和这个平面的交点. 下文简称平面顶点. vec3f 类型
const auto vertex = translation + ray_direction * t_star;
// 计算平面顶点在 volume 中的位置
const Vec3fda location_in_grid = (vertex / voxel_scale);
// 然后进行合法性检查, 如果确认这个 vertex 不在我们的 Volume 中那么我们就不管它了
if (location_in_grid.x() < 1 || location_in_grid.x() >= volume_size.x - 1 ||
location_in_grid.y() < 1 || location_in_grid.y() >= volume_size.y - 1 ||
location_in_grid.z() < 1 || location_in_grid.z() >= volume_size.z - 1)
break;
// step 3.4 分x, y, z三个轴, 计算这个 Grid 点所在处的平面的法向量
// normal - 法向量
// shifted - 中间变量, 用于滑动
Vec3fda normal, shifted;
// step 3.4.1 对 x 轴方向
shifted = location_in_grid;
// 在平面顶点的体素位置的基础上, 哎我滑~ 如果滑出体素范围就不管了
shifted.x() += 1;
if (shifted.x() >= volume_size.x - 1)
break;
// 这里得到的是 TSDF 值.
// 为什么不直接使用 shifted 对应体素的 TSDF 值而是进行三线性插值, 是因为 Volume 中只保存了体素中心点到平面的距离,
// 但是这里的 location_in_grid+1 也就是 shifted 是个浮点数, 为了得到相对准确的TSDF值, 需要进行三线性插值
const float Fx1 = interpolate_trilinearly(
shifted, // vertex 点在Volume的坐标滑动之后的点, Vec3fda
tsdf_volume, // TSDF Volume
volume_size, // Volume 的大小
voxel_scale); // 尺度信息
// 类似的操作, 不过滑动的时候换了一个方向
shifted = location_in_grid;
shifted.x() -= 1;
if (shifted.x() < 1)
break;
const float Fx2 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
// 由于 TSDF 值就反映了该体素中心点处到相机反投影平面的距离, 所以这里可以使用这个数据来进行表示
// ! 但是这样基于这个点周围体素中的距离都没有被截断才比较准确, 否则可能出现一个轴上的法向量为0的情况
normal.x() = (Fx1 - Fx2);
// step 3.4.2 对 y 轴方向
shifted = location_in_grid;
shifted.y() += 1;
if (shifted.y() >= volume_size.y - 1)
break;
const float Fy1 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
shifted = location_in_grid;
shifted.y() -= 1;
if (shifted.y() < 1)
break;
const float Fy2 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
normal.y() = (Fy1 - Fy2);
// step 3.4.3 对 z 轴方向
shifted = location_in_grid;
shifted.z() += 1;
if (shifted.z() >= volume_size.z - 1)
break;
const float Fz1 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
shifted = location_in_grid;
shifted.z() -= 1;
if (shifted.z() < 1)
break;
const float Fz2 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
normal.z() = (Fz1 - Fz2);
// step 3.4.4 检查法向量是否计算成功, 如果成功进行归一化
if (normal.norm() == 0)
break;
// 如果法向量计算成功, 那么首先归一化
normal.normalize();
// step 3.5 保存平面顶点和平面法向数据
// 然后将计算结果保存到顶点图和法向图中
model_vertex.ptr(y)[x] = make_float3(vertex.x(), vertex.y(), vertex.z());
model_normal.ptr(y)[x] = make_float3(normal.x(), normal.y(), normal.z());
// step 3.6 获取该点处的彩色数据
// 将浮点类型的这个顶点在Volume中的位置转换成为以int类型表示的
auto location_in_grid_int = location_in_grid.cast<int>();
// 然后就可以使用这个整数下标获取 Color Volume 中存储的彩色数据了, 将它保存到彩色图中
model_color.ptr(y)[x] = color_volume.ptr(
location_in_grid_int.z() * volume_size.y +
location_in_grid_int.y())[location_in_grid_int.x()];
break;
}
} // raycasting
}
// 执行当前帧的指定图层上的表面推理
void surface_prediction(
const VolumeData& volume, // Global Volume
GpuMat& model_vertex, // 推理得到的顶点图
GpuMat& model_normal, // 推理得到的法向图
GpuMat& model_color, // 推理得到的颜色
const CameraParameters& cam_parameters, // 当前图层的相机内参
const float truncation_distance, // 截断距离
const Eigen::Matrix4f& pose) // 当前帧的相机位姿
{
// step 0 数据准备: 清空顶点图\法向图\彩色图
model_vertex.setTo(0);
model_normal.setTo(0);
model_color.setTo(0);
// step 1 计算线程数量, 这和当前图层图像的大小有关
dim3 threads(32, 32);
dim3 blocks((model_vertex.cols + threads.x - 1) / threads.x,
(model_vertex.rows + threads.y - 1) / threads.y);
// step 2 调用核函数进行并行计算
raycast_tsdf_kernel<<<blocks, threads>>>(
volume.tsdf_volume, // Global TSDF Volume
volume.color_volume, // Global Color Volume
model_vertex, // 推理出来的顶点图
model_normal, // 推理出来的法向图
model_color, // 推理出来的颜色图
volume.volume_size, // Volume 尺寸
volume.voxel_scale, // Volume 缩放洗漱
cam_parameters, // 当前图层相机内参
truncation_distance, // 截断距离
pose.block(0, 0, 3, 3), // 从相机位姿中提取旋转矩阵
pose.block(0, 3, 3, 1)); // 从相机位姿中提取平移向量
// step 3 等待线程同步, 然后结束
cudaThreadSynchronize();
}
}
}
} |
b5626042d4a8994d5be2c14dc5cfec6e160714c9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "mul_.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float factor = 1;
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
mul_), dim3(gridBlock),dim3(threadBlock), 0, 0, input,factor,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
mul_), dim3(gridBlock),dim3(threadBlock), 0, 0, input,factor,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
mul_), dim3(gridBlock),dim3(threadBlock), 0, 0, input,factor,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b5626042d4a8994d5be2c14dc5cfec6e160714c9.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "mul_.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float factor = 1;
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
mul_<<<gridBlock,threadBlock>>>(input,factor,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
mul_<<<gridBlock,threadBlock>>>(input,factor,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
mul_<<<gridBlock,threadBlock>>>(input,factor,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0e2d0f59519fdb3653e2eb826f3772594ae08859.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "query_rmse.cuh"
#include <catboost/cuda/cuda_lib/kernel/kernel.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/fill.cuh>
namespace NKernel {
template <int BLOCK_SIZE>
__global__ void ComputeGroupMaximalsImpl(const float* target, const float* weights,
const float* approxExp,
const ui32* qOffsets, int offsetsBias,
const ui32* qSizes, int qCount,
float* maximals, float* sumWeightedTargets)
{
const int queriesPerBlock = BLOCK_SIZE / 32;
const int localQid = threadIdx.x / 32;
const int qid = blockIdx.x * queriesPerBlock + localQid;
__shared__ volatile float line[BLOCK_SIZE];
__shared__ float resultMaxApprox[queriesPerBlock];
__shared__ float resultSumWeightedTarget[queriesPerBlock];
ui32 readOffset = qid < qCount ? (qOffsets[qid] - offsetsBias) : 0;
weights += (weights != nullptr) ? readOffset : 0;
target += readOffset;
approxExp += readOffset;
maximals += blockIdx.x * queriesPerBlock;
sumWeightedTargets += blockIdx.x * queriesPerBlock;
line[threadIdx.x] = 0;
const int x = threadIdx.x & 31;
const int querySize = qid < qCount ? qSizes[qid] : 0;
float maxApprox = -FLT_MAX;
float sumWeightedTarget = 0;
for (int i = x; i < querySize; i += 32) {
const float t = __ldg(target + i);
const float w = weights != nullptr ? __ldg(weights + i) : 1.0f;
const float a = __ldg(approxExp + i);
maxApprox = (w > 0) ? max(maxApprox, a) : maxApprox;
sumWeightedTarget += t * w;
}
line[threadIdx.x] = maxApprox;
const float totalMaxApprox = WarpReduce(x, line + localQid * 32, 32, TCudaMax<float>());
line[threadIdx.x] = sumWeightedTarget;
const float totalSumWeightedTarget = WarpReduce(x, line + localQid * 32, 32);
if (x == 0 && (qid < qCount)) {
maximals[localQid] = totalMaxApprox;
sumWeightedTargets[localQid] = totalSumWeightedTarget;
}
}
void ComputeGroupMaximals(const float* target, const float* weights,
const float* approxExp,
const ui32* qOffsets, ui32 qOffsetsBias,
const ui32* qSizes, ui32 qCount,
float* maximals, float* sumWeightedTargets,
TCudaStream stream) {
const int blockSize = 128;
const int numBlocks = (qCount * 32 + 127) / blockSize;
if (numBlocks > 0) {
hipLaunchKernelGGL(( ComputeGroupMaximalsImpl<blockSize>) , dim3(numBlocks), dim3(blockSize), 0, stream , target, weights, approxExp, qOffsets, qOffsetsBias, qSizes, qCount, maximals, sumWeightedTargets);
}
}
template <int BLOCK_SIZE>
__global__ void ComputeQueryExponentsImpl(const float* weights,
const ui32* qids, ui32 size,
const float* maximals,
const ui32* writeMap,
float* approxExp,
float beta) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
const float weight = (weights && (i < size)) ? weights[i] : 1.0f;
const float approx = i < size ? approxExp[i] : 0;
const float apprMax = i < size ? __ldg(maximals + __ldg(qids + i)) : 0;
const float apprExp = __expf(beta * (approx - apprMax)) * weight;
if (i < size) {
approxExp[i] = apprExp;
}
}
void ComputeQueryExponents(const float* weights,
const ui32* qids, ui32 size,
const float* maximals,
const ui32* writeMap,
float* approxExp,
float beta,
TCudaStream stream) {
const ui32 blockSize = 1024;
const ui32 numBlocks = (size + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( ComputeQueryExponentsImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, weights, qids, size, maximals, writeMap, approxExp, beta);
}
template <int BLOCK_SIZE>
__global__ void ComputeGroupSumsImpl(const float* data,
const ui32* qOffsets, int offsetsBias,
const ui32* qSizes, int qCount,
float* groupSums)
{
const int queriesPerBlock = BLOCK_SIZE / 32;
const int localQid = threadIdx.x / 32;
const int qid = blockIdx.x * queriesPerBlock + localQid;
__shared__ volatile float line[BLOCK_SIZE];
__shared__ float result[queriesPerBlock];
ui32 readOffset = qid < qCount ? (qOffsets[qid] - offsetsBias) : 0;
data += readOffset;
groupSums += blockIdx.x * queriesPerBlock;
line[threadIdx.x] = 0;
const int x = threadIdx.x & 31;
const int querySize = qid < qCount ? qSizes[qid] : 0;
float sumData = 0;
for (int i = x; i < querySize; i += 32) {
const float a = __ldg(data + i);
sumData += a;
}
line[threadIdx.x] = sumData;
const float totalSumData = WarpReduce(x, line + localQid * 32, 32);
if (x == 0) {
result[localQid] = totalSumData;
}
__syncthreads();
if (x == 0 && (qid < qCount)) {
groupSums[localQid] = result[localQid];
}
}
void ComputeGroupSums(const float* approxExp,
const ui32* qOffsets, ui32 qOffsetsBias,
const ui32* qSizes, ui32 qCount,
float* approxExpSum, TCudaStream stream) {
const int blockSize = 128;
const int numBlocks = (qCount * 32 + 127) / blockSize;
if (numBlocks > 0) {
hipLaunchKernelGGL(( ComputeGroupSumsImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, approxExp, qOffsets, qOffsetsBias, qSizes, qCount, approxExpSum);
}
}
template <int BLOCK_SIZE>
__global__ void QuerySoftMaxImpl(const float* target, const float* weights,
const float* approxExp,
const ui32* qids,
float lambdaReg, float beta, ui32 size,
const float* approxExpSum,
const float* sumWeightedTargets,
const ui32* writeMap,
float* functionValue,
float* der,
float* der2) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float tmpScores[BLOCK_SIZE];
const float targetVal = i < size ? target[i] : 0;
const float weight = (weights && (i < size)) ? weights[i] : 1.0f;
const float approx = i < size ? approxExp[i] : 0;
const ui32 qid = i < size ? __ldg(qids + i) : 0;
const float approxSum = i < size ? __ldg(approxExpSum + qid) : 0;
const float sumTargets = i < size ? __ldg(sumWeightedTargets + qid) : 0;
const float softmax = approx / approxSum;
const float wt = weight * targetVal;
if (i < size) {
const ui32 dstIdx = writeMap != nullptr ? writeMap[i] : i;
if (der) {
der[dstIdx] = beta * (((weight > 0 && sumTargets > 0) ? (-sumTargets * softmax) : 0) + wt);
}
if (der2) {
der2[dstIdx] = (weight > 0 && sumTargets > 0) ? beta * sumTargets * (beta * softmax * (1 - softmax) + lambdaReg) : 0;
}
}
if (functionValue) {
tmpScores[threadIdx.x] = (i < size && weight > 0 && targetVal > 0) ? wt * __logf(softmax) : 0;
__syncthreads();
}
if (functionValue) {
const float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BLOCK_SIZE);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
void ApproximateQuerySoftMax(const float* target, const float* weights,
const float* approxExp,
const ui32* qids,
float lambdaReg, float beta, ui32 size,
const float* approxExpSum,
const float* sumWeightedTargets,
const ui32* writeMap,
float* functionValue,
float* der,
float* der2,
TCudaStream stream) {
const ui32 blockSize = 1024;
const ui32 numBlocks = (size + blockSize - 1) / blockSize;
//TODO: get rid of this
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
hipLaunchKernelGGL(( QuerySoftMaxImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, target, weights, approxExp, qids, lambdaReg, beta, size, approxExpSum, sumWeightedTargets, writeMap, functionValue, der, der2);
}
}
| 0e2d0f59519fdb3653e2eb826f3772594ae08859.cu | #include "query_rmse.cuh"
#include <catboost/cuda/cuda_lib/kernel/kernel.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/fill.cuh>
namespace NKernel {
template <int BLOCK_SIZE>
__global__ void ComputeGroupMaximalsImpl(const float* target, const float* weights,
const float* approxExp,
const ui32* qOffsets, int offsetsBias,
const ui32* qSizes, int qCount,
float* maximals, float* sumWeightedTargets)
{
const int queriesPerBlock = BLOCK_SIZE / 32;
const int localQid = threadIdx.x / 32;
const int qid = blockIdx.x * queriesPerBlock + localQid;
__shared__ volatile float line[BLOCK_SIZE];
__shared__ float resultMaxApprox[queriesPerBlock];
__shared__ float resultSumWeightedTarget[queriesPerBlock];
ui32 readOffset = qid < qCount ? (qOffsets[qid] - offsetsBias) : 0;
weights += (weights != nullptr) ? readOffset : 0;
target += readOffset;
approxExp += readOffset;
maximals += blockIdx.x * queriesPerBlock;
sumWeightedTargets += blockIdx.x * queriesPerBlock;
line[threadIdx.x] = 0;
const int x = threadIdx.x & 31;
const int querySize = qid < qCount ? qSizes[qid] : 0;
float maxApprox = -FLT_MAX;
float sumWeightedTarget = 0;
for (int i = x; i < querySize; i += 32) {
const float t = __ldg(target + i);
const float w = weights != nullptr ? __ldg(weights + i) : 1.0f;
const float a = __ldg(approxExp + i);
maxApprox = (w > 0) ? max(maxApprox, a) : maxApprox;
sumWeightedTarget += t * w;
}
line[threadIdx.x] = maxApprox;
const float totalMaxApprox = WarpReduce(x, line + localQid * 32, 32, TCudaMax<float>());
line[threadIdx.x] = sumWeightedTarget;
const float totalSumWeightedTarget = WarpReduce(x, line + localQid * 32, 32);
if (x == 0 && (qid < qCount)) {
maximals[localQid] = totalMaxApprox;
sumWeightedTargets[localQid] = totalSumWeightedTarget;
}
}
void ComputeGroupMaximals(const float* target, const float* weights,
const float* approxExp,
const ui32* qOffsets, ui32 qOffsetsBias,
const ui32* qSizes, ui32 qCount,
float* maximals, float* sumWeightedTargets,
TCudaStream stream) {
const int blockSize = 128;
const int numBlocks = (qCount * 32 + 127) / blockSize;
if (numBlocks > 0) {
ComputeGroupMaximalsImpl<blockSize> <<< numBlocks, blockSize, 0, stream >>> (target, weights, approxExp, qOffsets, qOffsetsBias, qSizes, qCount, maximals, sumWeightedTargets);
}
}
template <int BLOCK_SIZE>
__global__ void ComputeQueryExponentsImpl(const float* weights,
const ui32* qids, ui32 size,
const float* maximals,
const ui32* writeMap,
float* approxExp,
float beta) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
const float weight = (weights && (i < size)) ? weights[i] : 1.0f;
const float approx = i < size ? approxExp[i] : 0;
const float apprMax = i < size ? __ldg(maximals + __ldg(qids + i)) : 0;
const float apprExp = __expf(beta * (approx - apprMax)) * weight;
if (i < size) {
approxExp[i] = apprExp;
}
}
void ComputeQueryExponents(const float* weights,
const ui32* qids, ui32 size,
const float* maximals,
const ui32* writeMap,
float* approxExp,
float beta,
TCudaStream stream) {
const ui32 blockSize = 1024;
const ui32 numBlocks = (size + blockSize - 1) / blockSize;
ComputeQueryExponentsImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(weights, qids, size, maximals, writeMap, approxExp, beta);
}
template <int BLOCK_SIZE>
__global__ void ComputeGroupSumsImpl(const float* data,
const ui32* qOffsets, int offsetsBias,
const ui32* qSizes, int qCount,
float* groupSums)
{
const int queriesPerBlock = BLOCK_SIZE / 32;
const int localQid = threadIdx.x / 32;
const int qid = blockIdx.x * queriesPerBlock + localQid;
__shared__ volatile float line[BLOCK_SIZE];
__shared__ float result[queriesPerBlock];
ui32 readOffset = qid < qCount ? (qOffsets[qid] - offsetsBias) : 0;
data += readOffset;
groupSums += blockIdx.x * queriesPerBlock;
line[threadIdx.x] = 0;
const int x = threadIdx.x & 31;
const int querySize = qid < qCount ? qSizes[qid] : 0;
float sumData = 0;
for (int i = x; i < querySize; i += 32) {
const float a = __ldg(data + i);
sumData += a;
}
line[threadIdx.x] = sumData;
const float totalSumData = WarpReduce(x, line + localQid * 32, 32);
if (x == 0) {
result[localQid] = totalSumData;
}
__syncthreads();
if (x == 0 && (qid < qCount)) {
groupSums[localQid] = result[localQid];
}
}
void ComputeGroupSums(const float* approxExp,
const ui32* qOffsets, ui32 qOffsetsBias,
const ui32* qSizes, ui32 qCount,
float* approxExpSum, TCudaStream stream) {
const int blockSize = 128;
const int numBlocks = (qCount * 32 + 127) / blockSize;
if (numBlocks > 0) {
ComputeGroupSumsImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(approxExp, qOffsets, qOffsetsBias, qSizes, qCount, approxExpSum);
}
}
template <int BLOCK_SIZE>
__global__ void QuerySoftMaxImpl(const float* target, const float* weights,
const float* approxExp,
const ui32* qids,
float lambdaReg, float beta, ui32 size,
const float* approxExpSum,
const float* sumWeightedTargets,
const ui32* writeMap,
float* functionValue,
float* der,
float* der2) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float tmpScores[BLOCK_SIZE];
const float targetVal = i < size ? target[i] : 0;
const float weight = (weights && (i < size)) ? weights[i] : 1.0f;
const float approx = i < size ? approxExp[i] : 0;
const ui32 qid = i < size ? __ldg(qids + i) : 0;
const float approxSum = i < size ? __ldg(approxExpSum + qid) : 0;
const float sumTargets = i < size ? __ldg(sumWeightedTargets + qid) : 0;
const float softmax = approx / approxSum;
const float wt = weight * targetVal;
if (i < size) {
const ui32 dstIdx = writeMap != nullptr ? writeMap[i] : i;
if (der) {
der[dstIdx] = beta * (((weight > 0 && sumTargets > 0) ? (-sumTargets * softmax) : 0) + wt);
}
if (der2) {
der2[dstIdx] = (weight > 0 && sumTargets > 0) ? beta * sumTargets * (beta * softmax * (1 - softmax) + lambdaReg) : 0;
}
}
if (functionValue) {
tmpScores[threadIdx.x] = (i < size && weight > 0 && targetVal > 0) ? wt * __logf(softmax) : 0;
__syncthreads();
}
if (functionValue) {
const float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BLOCK_SIZE);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
void ApproximateQuerySoftMax(const float* target, const float* weights,
const float* approxExp,
const ui32* qids,
float lambdaReg, float beta, ui32 size,
const float* approxExpSum,
const float* sumWeightedTargets,
const ui32* writeMap,
float* functionValue,
float* der,
float* der2,
TCudaStream stream) {
const ui32 blockSize = 1024;
const ui32 numBlocks = (size + blockSize - 1) / blockSize;
//TODO: get rid of this
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
QuerySoftMaxImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(target, weights, approxExp, qids, lambdaReg, beta, size, approxExpSum, sumWeightedTargets, writeMap, functionValue, der, der2);
}
}
|
2c5bfa8afcf831fce9ed192c712e1ee1ed969848.hip | // !!! This is a file automatically generated by hipify!!!
/*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <stdio.h>
#include <stdlib.h>
// #include <hip/hip_runtime.h>
// #include <hip/hip_runtime_api.h>
#include <time.h>
#include "gputimer.h"
unsigned int filter_radius;
#define FILTER_LENGTH (2 * filter_radius + 1)
#define PADDING filter_radius
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 0.00005
typedef struct{
double d;
int padding;
}filterStruct;
__constant__ filterStruct filter_constant[1024];
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(double *h_Dst, double *h_Src, double *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = PADDING; (unsigned int)y < (unsigned int)imageH - PADDING; y++) {
for (x = PADDING; (unsigned int)x < (unsigned int)imageW - PADDING; x++) {
double sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(double *h_Dst, double *h_Src, double *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = PADDING; (unsigned int)y < (unsigned int)imageH - PADDING; y++) {
for (x = PADDING; (unsigned int)x < (unsigned int)imageW - PADDING; x++) {
double sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// GPU: Row convolution Kernel
////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_rows(const double *filter, const double *input, double *output,
int imageW, int imageH, int filterR, int filterStep){
int idx_x = threadIdx.x + blockDim.x * blockIdx.x;
int idx_y = threadIdx.y + blockDim.y * blockIdx.y;
int padding = filterR;
double sum = 0;
int k;
extern __shared__ double input_local[];
for(k = - filterR; k <= filterR; k+=filterStep)
input_local[threadIdx.y * (2 * padding + blockDim.x) + threadIdx.x + padding + k] = input[(idx_y + padding) * imageW + idx_x + padding + k];
// input_local[threadIdx.y * (2 * padding + blockDim.x) + threadIdx.x ] = input[(idx_y + padding) * imageW + idx_x];
// input_local[threadIdx.y * (2 * padding + blockDim.x) + threadIdx.x + padding ] = input[(idx_y + padding) * imageW + idx_x + padding];
// input_local[threadIdx.y * (2 * padding + blockDim.x) + threadIdx.x + padding + 1] = input[(idx_y + padding) * imageW + idx_x + padding + 1];
// input_local[threadIdx.y * (2 * padding + blockDim.x) + threadIdx.x + padding * 2] = input[(idx_y + padding) * imageW + idx_x + padding * 2];
__syncthreads();
// Rows
for(k = -filterR; k <= filterR; k++){
// int d = (idx_x + padding) + k;
sum += input_local[threadIdx.y * (2 * padding + blockDim.x) + padding + threadIdx.x + k] * filter_constant[filterR - k].d;
// sum += input[(idx_y + padding) * imageW + d] * filter[filterR - k];
}
output[(idx_y + padding) * imageW + idx_x + padding] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// GPU: Column convolution Kernel
////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_columns(const double *filter, const double *buffer, double *output,
int imageW, int imageH, int filterR, int filterStep){
int idx_x = threadIdx.x + blockDim.x * blockIdx.x;
int idx_y = threadIdx.y + blockDim.y * blockIdx.y;
int padding = filterR;
double sum = 0;
int k;
extern __shared__ double buffer_local[];
for(k = - filterR; k <= filterR; k+=filterStep)
buffer_local[(threadIdx.y + padding + k) * blockDim.x + threadIdx.x] = buffer[(idx_y + padding + k) * imageW + idx_x + padding];
// buffer_local[(threadIdx.y ) * blockDim.x + threadIdx.x] = buffer[(idx_y ) * imageW + idx_x + padding];
// buffer_local[(threadIdx.y + padding ) * blockDim.x + threadIdx.x] = buffer[(idx_y + padding ) * imageW + idx_x + padding];
// buffer_local[(threadIdx.y + padding + 1) * blockDim.x + threadIdx.x] = buffer[(idx_y + padding + 1) * imageW + idx_x + padding];
// buffer_local[(threadIdx.y + padding * 2) * blockDim.x + threadIdx.x] = buffer[(idx_y + padding * 2) * imageW + idx_x + padding];
__syncthreads();
// Columns
for(k = -filterR; k <= filterR; k++){
// int d = (idx_y + padding) + k;
sum += buffer_local[(threadIdx.y + padding + k) * blockDim.x + threadIdx.x] * filter_constant[filterR - k].d;
// sum += buffer_local[((idx_y + padding) + k) * imageW + (idx_x + padding)] * filter[filterR - k];
}
output[(idx_y + padding) * imageW + idx_x + padding] = sum;
}
// Auxiliary function for CUDA error checking
void cudaCheckForErrors(){
hipError_t error = hipGetLastError();
if(error != hipSuccess){
// something's gone wrong
// print out the CUDA error as a string
printf("CUDA Error: %s\n", hipGetErrorString(error));
exit(1);
}
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(void) {
double
*h_Filter,
*h_Input,
*h_Buffer,
*h_OutputCPU;
// GPU
double *d_Filter, *d_Input, *d_Buffer, *d_OutputGPU, *h_OutputGPU;
unsigned int imageW;
unsigned int imageH;
unsigned int i;
unsigned int j;
unsigned int padding_imageW;
unsigned int padding_imageH;
GpuTimer timer;
clock_t start_CPU, end_CPU;
printf("Enter filter radius : ");
scanf("%d", &filter_radius);
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
scanf("%d", &imageW);
imageH = imageW;
padding_imageH = PADDING * 2 + imageH;
padding_imageW = padding_imageH;
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
h_Filter = (double *)malloc(FILTER_LENGTH * sizeof(double));
h_Input = (double *)malloc(padding_imageW * padding_imageH * sizeof(double));
h_Buffer = (double *)malloc(padding_imageW * padding_imageH * sizeof(double));
h_OutputCPU = (double *)malloc(padding_imageW * padding_imageH * sizeof(double));
h_OutputGPU = (double *)malloc(padding_imageW * padding_imageH * sizeof(double));
hipMalloc( (void **) &d_Filter, FILTER_LENGTH * sizeof(double));
hipMalloc( (void **) &d_Input, padding_imageW * padding_imageH * sizeof(double));
hipMalloc( (void **) &d_Buffer,padding_imageW * padding_imageH * sizeof(double));
hipMalloc( (void **) &d_OutputGPU, padding_imageW * padding_imageH * sizeof(double));
if(!h_Filter || !h_Input || !h_Buffer || !h_OutputCPU || !h_OutputGPU){
printf("error allocating memory for the host\n");
exit(1);
}
if(!d_Filter || !d_Input || !d_Buffer || !d_OutputGPU){
printf("Error allocating memory for the device\n");
exit(1);
}
hipMemset(d_Buffer, 0, padding_imageW * padding_imageW * sizeof(double));
hipMemset(d_OutputGPU, 0, padding_imageW * padding_imageW * sizeof(double));
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (double)(rand() % 16);
}
for (i = 0; i < padding_imageH; i++) {
for (j = 0; j < padding_imageW; j++) {
if(i < PADDING || i >= imageW + PADDING || j < PADDING || j >= imageW + PADDING)
h_Input[i * padding_imageW + j] = 0;
else
h_Input[i*padding_imageW + j] = (double)rand() / ((double)RAND_MAX / 255) + (double)rand() / (double)RAND_MAX;
}
}
//////////////////////////////// CPU ///////////////////////////////////////
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
printf("CPU computation...\n");
start_CPU = clock();
convolutionRowCPU(h_Buffer, h_Input, h_Filter, padding_imageW, padding_imageH, filter_radius); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, padding_imageW, padding_imageH, filter_radius); // convolution kata sthles
end_CPU = clock();
printf("CPU Time: %lf ms\n", ((double) ((end_CPU - start_CPU) * 1000)) / CLOCKS_PER_SEC);
//////////////////////////////// GPU ///////////////////////////////////////
dim3 block_dim;
dim3 grid_dim;
if(padding_imageW < 32){
block_dim.x = imageH;
block_dim.y = imageH;
grid_dim.x = 1;
grid_dim.y = 1;
} else{
block_dim.x = 32;
block_dim.y = 32;
grid_dim.x = imageW / block_dim.x;
grid_dim.y = imageH / block_dim.y;
}
int filterStep;
if(filter_radius < 16)
filterStep = filter_radius;
else if(filter_radius % 32 == 0)
filterStep = 32;
else if(filter_radius % 16)
filterStep = 2;
else
filterStep = 16;
// gia na tsekaroume gia padding
filterStruct *h_Filter_struct;
h_Filter_struct = (filterStruct *) malloc(FILTER_LENGTH * sizeof(filterStruct));
if(!h_Filter_struct){
printf("Error allocating memory for the padding filter struct.\n");
exit(1);
}
for(i = 0; i < FILTER_LENGTH; i++){
h_Filter_struct[i].d = h_Filter[i];
}
hipMemcpyToSymbol(filter_constant, h_Filter_struct, FILTER_LENGTH * sizeof(filterStruct));
printf("GPU computation...\n");
timer.Start();
//hipMemcpy(d_Buffer, h_Input, padding_imageW * padding_imageW * sizeof(double), hipMemcpyHostToDevice);
// hipMemcpy(d_Filter, h_Filter, FILTER_LENGTH * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_Input, h_Input, padding_imageW * padding_imageW * sizeof(double), hipMemcpyHostToDevice);
// kernel_rows<<<grid_dim, block_dim>>>(d_Filter, d_Input, d_Buffer, padding_imageW, padding_imageH, filter_radius);
hipLaunchKernelGGL(( kernel_rows), dim3(grid_dim), dim3(block_dim), block_dim.y * (2 * filter_radius + block_dim.x) * sizeof(double), 0, d_Filter, d_Input, d_Buffer, padding_imageW, padding_imageH, filter_radius, filterStep);
hipDeviceSynchronize();
cudaCheckForErrors();
// kernel_columns<<<grid_dim, block_dim>>>(d_Filter, d_Buffer, d_OutputGPU, padding_imageW, padding_imageH, filter_radius);
hipLaunchKernelGGL(( kernel_columns), dim3(grid_dim), dim3(block_dim), block_dim.x * (2 * filter_radius + block_dim.y) * sizeof(double), 0, d_Filter, d_Buffer, d_OutputGPU, padding_imageW, padding_imageH, filter_radius, filterStep);
hipDeviceSynchronize();
cudaCheckForErrors();
hipMemcpy(h_OutputGPU, d_OutputGPU, padding_imageW * padding_imageH * sizeof(double), hipMemcpyDeviceToHost);
timer.Stop();
printf("GPU Time elapsed = %g ms\n", timer.Elapsed());
//////////////////////// RESULT COMPARISON /////////////////////////////////
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
for(i = 0; i < imageH * imageW; i++){
if(ABS(h_OutputGPU[i] - h_OutputCPU[i]) >= accuracy){
printf("GPU computations are not as accurate as we want.\n");
break;
}
}
////////////////// CPU: free all the allocated memory //////////////////////
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
////////////////// GPU: free all the allocated memory //////////////////////
free(h_OutputGPU);
hipFree(d_Filter);
hipFree(d_Input);
hipFree(d_Buffer);
hipFree(d_OutputGPU);
// Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA
hipDeviceReset();
return 0;
}
| 2c5bfa8afcf831fce9ed192c712e1ee1ed969848.cu | /*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <stdio.h>
#include <stdlib.h>
// #include <cuda.h>
// #include <cuda_runtime_api.h>
#include <time.h>
#include "gputimer.h"
unsigned int filter_radius;
#define FILTER_LENGTH (2 * filter_radius + 1)
#define PADDING filter_radius
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 0.00005
typedef struct{
double d;
int padding;
}filterStruct;
__constant__ filterStruct filter_constant[1024];
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(double *h_Dst, double *h_Src, double *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = PADDING; (unsigned int)y < (unsigned int)imageH - PADDING; y++) {
for (x = PADDING; (unsigned int)x < (unsigned int)imageW - PADDING; x++) {
double sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(double *h_Dst, double *h_Src, double *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = PADDING; (unsigned int)y < (unsigned int)imageH - PADDING; y++) {
for (x = PADDING; (unsigned int)x < (unsigned int)imageW - PADDING; x++) {
double sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// GPU: Row convolution Kernel
////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_rows(const double *filter, const double *input, double *output,
int imageW, int imageH, int filterR, int filterStep){
int idx_x = threadIdx.x + blockDim.x * blockIdx.x;
int idx_y = threadIdx.y + blockDim.y * blockIdx.y;
int padding = filterR;
double sum = 0;
int k;
extern __shared__ double input_local[];
for(k = - filterR; k <= filterR; k+=filterStep)
input_local[threadIdx.y * (2 * padding + blockDim.x) + threadIdx.x + padding + k] = input[(idx_y + padding) * imageW + idx_x + padding + k];
// input_local[threadIdx.y * (2 * padding + blockDim.x) + threadIdx.x ] = input[(idx_y + padding) * imageW + idx_x];
// input_local[threadIdx.y * (2 * padding + blockDim.x) + threadIdx.x + padding ] = input[(idx_y + padding) * imageW + idx_x + padding];
// input_local[threadIdx.y * (2 * padding + blockDim.x) + threadIdx.x + padding + 1] = input[(idx_y + padding) * imageW + idx_x + padding + 1];
// input_local[threadIdx.y * (2 * padding + blockDim.x) + threadIdx.x + padding * 2] = input[(idx_y + padding) * imageW + idx_x + padding * 2];
__syncthreads();
// Rows
for(k = -filterR; k <= filterR; k++){
// int d = (idx_x + padding) + k;
sum += input_local[threadIdx.y * (2 * padding + blockDim.x) + padding + threadIdx.x + k] * filter_constant[filterR - k].d;
// sum += input[(idx_y + padding) * imageW + d] * filter[filterR - k];
}
output[(idx_y + padding) * imageW + idx_x + padding] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// GPU: Column convolution Kernel
////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_columns(const double *filter, const double *buffer, double *output,
int imageW, int imageH, int filterR, int filterStep){
int idx_x = threadIdx.x + blockDim.x * blockIdx.x;
int idx_y = threadIdx.y + blockDim.y * blockIdx.y;
int padding = filterR;
double sum = 0;
int k;
extern __shared__ double buffer_local[];
for(k = - filterR; k <= filterR; k+=filterStep)
buffer_local[(threadIdx.y + padding + k) * blockDim.x + threadIdx.x] = buffer[(idx_y + padding + k) * imageW + idx_x + padding];
// buffer_local[(threadIdx.y ) * blockDim.x + threadIdx.x] = buffer[(idx_y ) * imageW + idx_x + padding];
// buffer_local[(threadIdx.y + padding ) * blockDim.x + threadIdx.x] = buffer[(idx_y + padding ) * imageW + idx_x + padding];
// buffer_local[(threadIdx.y + padding + 1) * blockDim.x + threadIdx.x] = buffer[(idx_y + padding + 1) * imageW + idx_x + padding];
// buffer_local[(threadIdx.y + padding * 2) * blockDim.x + threadIdx.x] = buffer[(idx_y + padding * 2) * imageW + idx_x + padding];
__syncthreads();
// Columns
for(k = -filterR; k <= filterR; k++){
// int d = (idx_y + padding) + k;
sum += buffer_local[(threadIdx.y + padding + k) * blockDim.x + threadIdx.x] * filter_constant[filterR - k].d;
// sum += buffer_local[((idx_y + padding) + k) * imageW + (idx_x + padding)] * filter[filterR - k];
}
output[(idx_y + padding) * imageW + idx_x + padding] = sum;
}
// Auxiliary function for CUDA error checking
void cudaCheckForErrors(){
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess){
// something's gone wrong
// print out the CUDA error as a string
printf("CUDA Error: %s\n", cudaGetErrorString(error));
exit(1);
}
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(void) {
double
*h_Filter,
*h_Input,
*h_Buffer,
*h_OutputCPU;
// GPU
double *d_Filter, *d_Input, *d_Buffer, *d_OutputGPU, *h_OutputGPU;
unsigned int imageW;
unsigned int imageH;
unsigned int i;
unsigned int j;
unsigned int padding_imageW;
unsigned int padding_imageH;
GpuTimer timer;
clock_t start_CPU, end_CPU;
printf("Enter filter radius : ");
scanf("%d", &filter_radius);
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
scanf("%d", &imageW);
imageH = imageW;
padding_imageH = PADDING * 2 + imageH;
padding_imageW = padding_imageH;
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
h_Filter = (double *)malloc(FILTER_LENGTH * sizeof(double));
h_Input = (double *)malloc(padding_imageW * padding_imageH * sizeof(double));
h_Buffer = (double *)malloc(padding_imageW * padding_imageH * sizeof(double));
h_OutputCPU = (double *)malloc(padding_imageW * padding_imageH * sizeof(double));
h_OutputGPU = (double *)malloc(padding_imageW * padding_imageH * sizeof(double));
cudaMalloc( (void **) &d_Filter, FILTER_LENGTH * sizeof(double));
cudaMalloc( (void **) &d_Input, padding_imageW * padding_imageH * sizeof(double));
cudaMalloc( (void **) &d_Buffer,padding_imageW * padding_imageH * sizeof(double));
cudaMalloc( (void **) &d_OutputGPU, padding_imageW * padding_imageH * sizeof(double));
if(!h_Filter || !h_Input || !h_Buffer || !h_OutputCPU || !h_OutputGPU){
printf("error allocating memory for the host\n");
exit(1);
}
if(!d_Filter || !d_Input || !d_Buffer || !d_OutputGPU){
printf("Error allocating memory for the device\n");
exit(1);
}
cudaMemset(d_Buffer, 0, padding_imageW * padding_imageW * sizeof(double));
cudaMemset(d_OutputGPU, 0, padding_imageW * padding_imageW * sizeof(double));
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (double)(rand() % 16);
}
for (i = 0; i < padding_imageH; i++) {
for (j = 0; j < padding_imageW; j++) {
if(i < PADDING || i >= imageW + PADDING || j < PADDING || j >= imageW + PADDING)
h_Input[i * padding_imageW + j] = 0;
else
h_Input[i*padding_imageW + j] = (double)rand() / ((double)RAND_MAX / 255) + (double)rand() / (double)RAND_MAX;
}
}
//////////////////////////////// CPU ///////////////////////////////////////
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
printf("CPU computation...\n");
start_CPU = clock();
convolutionRowCPU(h_Buffer, h_Input, h_Filter, padding_imageW, padding_imageH, filter_radius); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, padding_imageW, padding_imageH, filter_radius); // convolution kata sthles
end_CPU = clock();
printf("CPU Time: %lf ms\n", ((double) ((end_CPU - start_CPU) * 1000)) / CLOCKS_PER_SEC);
//////////////////////////////// GPU ///////////////////////////////////////
dim3 block_dim;
dim3 grid_dim;
if(padding_imageW < 32){
block_dim.x = imageH;
block_dim.y = imageH;
grid_dim.x = 1;
grid_dim.y = 1;
} else{
block_dim.x = 32;
block_dim.y = 32;
grid_dim.x = imageW / block_dim.x;
grid_dim.y = imageH / block_dim.y;
}
int filterStep;
if(filter_radius < 16)
filterStep = filter_radius;
else if(filter_radius % 32 == 0)
filterStep = 32;
else if(filter_radius % 16)
filterStep = 2;
else
filterStep = 16;
// gia na tsekaroume gia padding
filterStruct *h_Filter_struct;
h_Filter_struct = (filterStruct *) malloc(FILTER_LENGTH * sizeof(filterStruct));
if(!h_Filter_struct){
printf("Error allocating memory for the padding filter struct.\n");
exit(1);
}
for(i = 0; i < FILTER_LENGTH; i++){
h_Filter_struct[i].d = h_Filter[i];
}
cudaMemcpyToSymbol(filter_constant, h_Filter_struct, FILTER_LENGTH * sizeof(filterStruct));
printf("GPU computation...\n");
timer.Start();
//cudaMemcpy(d_Buffer, h_Input, padding_imageW * padding_imageW * sizeof(double), cudaMemcpyHostToDevice);
// cudaMemcpy(d_Filter, h_Filter, FILTER_LENGTH * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_Input, h_Input, padding_imageW * padding_imageW * sizeof(double), cudaMemcpyHostToDevice);
// kernel_rows<<<grid_dim, block_dim>>>(d_Filter, d_Input, d_Buffer, padding_imageW, padding_imageH, filter_radius);
kernel_rows<<<grid_dim, block_dim, block_dim.y * (2 * filter_radius + block_dim.x) * sizeof(double)>>>(d_Filter, d_Input, d_Buffer, padding_imageW, padding_imageH, filter_radius, filterStep);
cudaDeviceSynchronize();
cudaCheckForErrors();
// kernel_columns<<<grid_dim, block_dim>>>(d_Filter, d_Buffer, d_OutputGPU, padding_imageW, padding_imageH, filter_radius);
kernel_columns<<<grid_dim, block_dim, block_dim.x * (2 * filter_radius + block_dim.y) * sizeof(double)>>>(d_Filter, d_Buffer, d_OutputGPU, padding_imageW, padding_imageH, filter_radius, filterStep);
cudaDeviceSynchronize();
cudaCheckForErrors();
cudaMemcpy(h_OutputGPU, d_OutputGPU, padding_imageW * padding_imageH * sizeof(double), cudaMemcpyDeviceToHost);
timer.Stop();
printf("GPU Time elapsed = %g ms\n", timer.Elapsed());
//////////////////////// RESULT COMPARISON /////////////////////////////////
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
for(i = 0; i < imageH * imageW; i++){
if(ABS(h_OutputGPU[i] - h_OutputCPU[i]) >= accuracy){
printf("GPU computations are not as accurate as we want.\n");
break;
}
}
////////////////// CPU: free all the allocated memory //////////////////////
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
////////////////// GPU: free all the allocated memory //////////////////////
free(h_OutputGPU);
cudaFree(d_Filter);
cudaFree(d_Input);
cudaFree(d_Buffer);
cudaFree(d_OutputGPU);
// Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA
cudaDeviceReset();
return 0;
}
|
081e36e64821c428fc47613d7fdf482c62509b6a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/highgui/highgui.hpp>
#include "gcube.h"
#include "gpu_util.h"
gcube::gcube(void) {
this->d_pixels = NULL;
this->create(0, 0, 0, gfill::none);
}
gcube::gcube(size_t n_rows, size_t n_cols, size_t n_slices, uint8_t fill_type) {
this->d_pixels = NULL;
this->create(n_rows, n_cols, n_slices, fill_type);
}
gcube::gcube(const gcube &gpucube) {
this->d_pixels = NULL;
this->create(gpucube.n_rows, gpucube.n_cols, gpucube.n_slices, gfill::none);
checkCudaErrors(hipMemcpy(this->d_pixels, gpucube.d_pixels, this->n_elem * sizeof(float), hipMemcpyDeviceToDevice));
}
gcube::gcube(const std::string &fname) {
this->d_pixels = NULL;
this->load(fname);
}
gcube::~gcube(void) {
if (this->d_pixels) {
checkCudaErrors(hipFree(this->d_pixels));
}
}
__global__ void GPU_map_assign(float *F, float val, size_t n_elems) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_elems) {
return;
}
F[idx] = val;
}
void gcube::create(size_t n_rows, size_t n_cols, size_t n_slices, uint8_t fill_type) {
if (this->d_pixels) {
checkCudaErrors(hipFree(d_pixels));
}
this->n_rows = n_rows;
this->n_cols = n_cols;
this->n_slices = n_slices;
this->n_elem = n_rows * n_cols * n_slices;
if (this->n_elem == 0) {
this->d_pixels = NULL;
} else {
checkCudaErrors(hipMalloc(&this->d_pixels, this->n_elem * sizeof(float)));
switch (fill_type) {
case gfill::none:
break;
case gfill::zeros:
checkCudaErrors(hipMemset(&this->d_pixels, 0, this->n_elem * sizeof(float)));
break;
case gfill::ones:
hipLaunchKernelGGL(( GPU_map_assign), dim3((this->n_elem-1) / 128 + 1), dim3(128), 0, 0, this->d_pixels, 1, this->n_elem);
checkCudaErrors(hipGetLastError());
break;
default:
break;
}
}
}
gcube &gcube::operator=(const gcube &gpucube) {
this->create(gpucube.n_rows, gpucube.n_cols, gpucube.n_slices, gfill::none);
checkCudaErrors(hipMemcpy(this->d_pixels, gpucube.d_pixels, this->n_elem * sizeof(float), hipMemcpyDeviceToDevice));
return *this;
}
void gcube::load(const std::string &fname) {
this->create(cv::imread(fname));
}
void gcube::save(const std::string &fname) {
cv::imwrite(fname, this->cv_mat());
}
// Specific OpenCV interaction (to make sure that they are backwards compatible)
gcube::gcube(cv::Mat &cvMat) {
this->d_pixels = NULL;
this->create(cvMat);
}
void gcube::create(const cv::Mat &cvMat) {
this->create(cvMat.rows, cvMat.cols, cvMat.channels(), gfill::none);
float *h_pixels = new float[this->n_elem];
for (int i = 0; i < this->n_rows; i++) {
for (int j = 0; j < this->n_cols; j++) {
cv::Vec3b color = cvMat.at<cv::Vec3b>(i, j);
for (int k = 0; k < this->n_slices; k++) {
h_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)] = (float)color[k] / 255.0f;
}
}
}
checkCudaErrors(hipMemcpy(this->d_pixels, h_pixels, this->n_elem * sizeof(float), hipMemcpyHostToDevice));
free(h_pixels);
}
cv::Mat gcube::cv_mat(void) {
cv::Mat cv_image(this->n_rows, this->n_cols, CV_8UC3);
float *h_pixels = new float[this->n_elem];
checkCudaErrors(hipMemcpy(h_pixels, this->d_pixels, this->n_elem * sizeof(float), hipMemcpyDeviceToHost));
for (int i = 0; i < this->n_rows; i++) {
for (int j = 0; j < this->n_cols; j++) {
if (this->n_slices == 1) {
cv_image.at<cv::Vec3b>(i, j) =
cv::Vec3b((int)(h_pixels[IJ2C(i, j, this->n_rows)] * 255.0f),
(int)(h_pixels[IJ2C(i, j, this->n_rows)] * 255.0f),
(int)(h_pixels[IJ2C(i, j, this->n_rows)] * 255.0f));
} else if (this->n_slices == 3) {
cv_image.at<cv::Vec3b>(i, j) =
cv::Vec3b((int)(h_pixels[IJK2C(i, j, 0, this->n_rows, this->n_cols)] * 255.0f),
(int)(h_pixels[IJK2C(i, j, 1, this->n_rows, this->n_cols)] * 255.0f),
(int)(h_pixels[IJK2C(i, j, 2, this->n_rows, this->n_cols)] * 255.0f));
}
}
}
free(h_pixels);
return cv_image;
}
gcube &gcube::operator=(const cv::Mat &cvMat) {
this->create(cvMat);
return *this;
}
| 081e36e64821c428fc47613d7fdf482c62509b6a.cu | #include <opencv2/highgui/highgui.hpp>
#include "gcube.h"
#include "gpu_util.h"
gcube::gcube(void) {
this->d_pixels = NULL;
this->create(0, 0, 0, gfill::none);
}
gcube::gcube(size_t n_rows, size_t n_cols, size_t n_slices, uint8_t fill_type) {
this->d_pixels = NULL;
this->create(n_rows, n_cols, n_slices, fill_type);
}
gcube::gcube(const gcube &gpucube) {
this->d_pixels = NULL;
this->create(gpucube.n_rows, gpucube.n_cols, gpucube.n_slices, gfill::none);
checkCudaErrors(cudaMemcpy(this->d_pixels, gpucube.d_pixels, this->n_elem * sizeof(float), cudaMemcpyDeviceToDevice));
}
gcube::gcube(const std::string &fname) {
this->d_pixels = NULL;
this->load(fname);
}
gcube::~gcube(void) {
if (this->d_pixels) {
checkCudaErrors(cudaFree(this->d_pixels));
}
}
__global__ void GPU_map_assign(float *F, float val, size_t n_elems) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_elems) {
return;
}
F[idx] = val;
}
void gcube::create(size_t n_rows, size_t n_cols, size_t n_slices, uint8_t fill_type) {
if (this->d_pixels) {
checkCudaErrors(cudaFree(d_pixels));
}
this->n_rows = n_rows;
this->n_cols = n_cols;
this->n_slices = n_slices;
this->n_elem = n_rows * n_cols * n_slices;
if (this->n_elem == 0) {
this->d_pixels = NULL;
} else {
checkCudaErrors(cudaMalloc(&this->d_pixels, this->n_elem * sizeof(float)));
switch (fill_type) {
case gfill::none:
break;
case gfill::zeros:
checkCudaErrors(cudaMemset(&this->d_pixels, 0, this->n_elem * sizeof(float)));
break;
case gfill::ones:
GPU_map_assign<<<(this->n_elem-1) / 128 + 1, 128>>>(this->d_pixels, 1, this->n_elem);
checkCudaErrors(cudaGetLastError());
break;
default:
break;
}
}
}
gcube &gcube::operator=(const gcube &gpucube) {
this->create(gpucube.n_rows, gpucube.n_cols, gpucube.n_slices, gfill::none);
checkCudaErrors(cudaMemcpy(this->d_pixels, gpucube.d_pixels, this->n_elem * sizeof(float), cudaMemcpyDeviceToDevice));
return *this;
}
void gcube::load(const std::string &fname) {
this->create(cv::imread(fname));
}
void gcube::save(const std::string &fname) {
cv::imwrite(fname, this->cv_mat());
}
// Specific OpenCV interaction (to make sure that they are backwards compatible)
gcube::gcube(cv::Mat &cvMat) {
this->d_pixels = NULL;
this->create(cvMat);
}
void gcube::create(const cv::Mat &cvMat) {
this->create(cvMat.rows, cvMat.cols, cvMat.channels(), gfill::none);
float *h_pixels = new float[this->n_elem];
for (int i = 0; i < this->n_rows; i++) {
for (int j = 0; j < this->n_cols; j++) {
cv::Vec3b color = cvMat.at<cv::Vec3b>(i, j);
for (int k = 0; k < this->n_slices; k++) {
h_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)] = (float)color[k] / 255.0f;
}
}
}
checkCudaErrors(cudaMemcpy(this->d_pixels, h_pixels, this->n_elem * sizeof(float), cudaMemcpyHostToDevice));
free(h_pixels);
}
cv::Mat gcube::cv_mat(void) {
cv::Mat cv_image(this->n_rows, this->n_cols, CV_8UC3);
float *h_pixels = new float[this->n_elem];
checkCudaErrors(cudaMemcpy(h_pixels, this->d_pixels, this->n_elem * sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i < this->n_rows; i++) {
for (int j = 0; j < this->n_cols; j++) {
if (this->n_slices == 1) {
cv_image.at<cv::Vec3b>(i, j) =
cv::Vec3b((int)(h_pixels[IJ2C(i, j, this->n_rows)] * 255.0f),
(int)(h_pixels[IJ2C(i, j, this->n_rows)] * 255.0f),
(int)(h_pixels[IJ2C(i, j, this->n_rows)] * 255.0f));
} else if (this->n_slices == 3) {
cv_image.at<cv::Vec3b>(i, j) =
cv::Vec3b((int)(h_pixels[IJK2C(i, j, 0, this->n_rows, this->n_cols)] * 255.0f),
(int)(h_pixels[IJK2C(i, j, 1, this->n_rows, this->n_cols)] * 255.0f),
(int)(h_pixels[IJK2C(i, j, 2, this->n_rows, this->n_cols)] * 255.0f));
}
}
}
free(h_pixels);
return cv_image;
}
gcube &gcube::operator=(const cv::Mat &cvMat) {
this->create(cvMat);
return *this;
}
|
41b612b4da705df89b745330d5dec01402244529.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <future>
#include <thread>
#include <chrono>
#include <iostream>
#define N 1000000
#define NUM_THREADS_PER_BLOCK 256
#define NUM_BLOCKS_PER_GRID 1024
//#define NUM_BLOCKS_PER_GRID (N + NUM_THREADS_PER_BLOCK-1) / NUM_THREADS_PER_BLOCK;
__constant__ int factor = 0;
__global__
void vectorAdd(int *a, int *b, int *c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
c[i] = factor*(a[i] + b[i]);
}
__global__
void matrixAdd(int **a,int **b, int**c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
c[i][j] = a[i][j] + b[i][j];
}
__global__
void dotProduct(float *a, float *b, float *c) {
// shared memory!
__shared__ float cache[NUM_THREADS_PER_BLOCK];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float runningSum = 0;
while (tid < N) {
runningSum += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
// store the current running sum for the threads
cache[cacheIndex] = runningSum;
// sync all the threads before starting to cooperate
__syncthreads();
// reduction
int i = blockDim.x/2; // number of threads per block
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
#define PRINT(x) \
std::cout << #x " = " << x << std::endl
void func(const char* ptr) {
std::cout << "ptr = " << ptr << std::endl;
}
int main(int argc, char** argv) {
// start time
auto startTime = std::chrono::high_resolution_clock::now();
printf("Hello World\n");
// get the number of devices
int numDevices;
hipGetDeviceCount(&numDevices);
PRINT(numDevices);
hipDeviceProp_t prop;
for (auto i=0 ; i<numDevices; i++) {
hipGetDeviceProperties(&prop, i);
PRINT(prop.name);
PRINT(prop.totalGlobalMem);
PRINT(prop.sharedMemPerBlock);
PRINT(prop.regsPerBlock);
PRINT(prop.warpSize);
PRINT(prop.memPitch);
PRINT(prop.maxThreadsPerBlock);
PRINT(prop.maxThreadsDim[0]);
PRINT(prop.maxThreadsDim[1]);
PRINT(prop.maxThreadsDim[2]);
PRINT(prop.maxGridSize[0]);
PRINT(prop.maxGridSize[1]);
PRINT(prop.maxGridSize[2]);
PRINT(prop.totalConstMem);
PRINT(prop.major);
PRINT(prop.minor);
PRINT(prop.clockRate);
PRINT(prop.textureAlignment);
PRINT(prop.deviceOverlap);
PRINT(prop.multiProcessorCount);
PRINT(prop.kernelExecTimeoutEnabled);
PRINT(prop.integrated);
PRINT(prop.canMapHostMemory);
PRINT(prop.computeMode);
PRINT(prop.maxTexture1D);
PRINT(prop.maxTexture2D[0]);
PRINT(prop.maxTexture2D[1]);
PRINT(prop.maxTexture3D[0]);
PRINT(prop.maxTexture3D[1]);
PRINT(prop.maxTexture3D[2]);
// PRINT(prop.maxTexture2DArray[0]);
// PRINT(prop.maxTexture2DArray[1]);
// PRINT(prop.maxTexture2DArray[2]);
PRINT(prop.concurrentKernels);
}
float h_a[N], h_b[N], h_c[NUM_BLOCKS_PER_GRID];
float *d_a, *d_b, *d_c;
hipMalloc(&d_a, N*sizeof(float));
hipMalloc(&d_b, N*sizeof(float));
hipMalloc(&d_c, NUM_BLOCKS_PER_GRID*sizeof(float));
for (auto i=0; i<N; i++) {
h_a[i] = i;
h_b[i ] = i*2;
}
hipMemcpy(d_a, h_a, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, N*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( dotProduct), dim3(NUM_BLOCKS_PER_GRID), dim3(NUM_THREADS_PER_BLOCK), 0, 0, d_a, d_b, d_c);
hipMemcpy(h_c, d_c, NUM_BLOCKS_PER_GRID*sizeof(float), hipMemcpyDeviceToHost);
float sum = 0;
for (auto i=0; i<NUM_BLOCKS_PER_GRID; i++)
sum += h_c[i];
hipFree(d_c);
hipFree(d_a);
hipFree(d_b);
#define sum_squares(x) (x*(x+1)*(2*x+1)/6)
printf("Doues GPU version equal CPU version: %.6g = %.6g\n", sum, 2*sum_squares((float)(N-1)));
// stop time
auto stopTime = std::chrono::high_resolution_clock::now();
PRINT((stopTime - startTime).count());
printf("Goodbye World\n");
}
| 41b612b4da705df89b745330d5dec01402244529.cu | #include <stdio.h>
#include <future>
#include <thread>
#include <chrono>
#include <iostream>
#define N 1000000
#define NUM_THREADS_PER_BLOCK 256
#define NUM_BLOCKS_PER_GRID 1024
//#define NUM_BLOCKS_PER_GRID (N + NUM_THREADS_PER_BLOCK-1) / NUM_THREADS_PER_BLOCK;
__constant__ int factor = 0;
__global__
void vectorAdd(int *a, int *b, int *c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
c[i] = factor*(a[i] + b[i]);
}
__global__
void matrixAdd(int **a,int **b, int**c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
c[i][j] = a[i][j] + b[i][j];
}
__global__
void dotProduct(float *a, float *b, float *c) {
// shared memory!
__shared__ float cache[NUM_THREADS_PER_BLOCK];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float runningSum = 0;
while (tid < N) {
runningSum += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
// store the current running sum for the threads
cache[cacheIndex] = runningSum;
// sync all the threads before starting to cooperate
__syncthreads();
// reduction
int i = blockDim.x/2; // number of threads per block
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
#define PRINT(x) \
std::cout << #x " = " << x << std::endl
void func(const char* ptr) {
std::cout << "ptr = " << ptr << std::endl;
}
int main(int argc, char** argv) {
// start time
auto startTime = std::chrono::high_resolution_clock::now();
printf("Hello World\n");
// get the number of devices
int numDevices;
cudaGetDeviceCount(&numDevices);
PRINT(numDevices);
cudaDeviceProp prop;
for (auto i=0 ; i<numDevices; i++) {
cudaGetDeviceProperties(&prop, i);
PRINT(prop.name);
PRINT(prop.totalGlobalMem);
PRINT(prop.sharedMemPerBlock);
PRINT(prop.regsPerBlock);
PRINT(prop.warpSize);
PRINT(prop.memPitch);
PRINT(prop.maxThreadsPerBlock);
PRINT(prop.maxThreadsDim[0]);
PRINT(prop.maxThreadsDim[1]);
PRINT(prop.maxThreadsDim[2]);
PRINT(prop.maxGridSize[0]);
PRINT(prop.maxGridSize[1]);
PRINT(prop.maxGridSize[2]);
PRINT(prop.totalConstMem);
PRINT(prop.major);
PRINT(prop.minor);
PRINT(prop.clockRate);
PRINT(prop.textureAlignment);
PRINT(prop.deviceOverlap);
PRINT(prop.multiProcessorCount);
PRINT(prop.kernelExecTimeoutEnabled);
PRINT(prop.integrated);
PRINT(prop.canMapHostMemory);
PRINT(prop.computeMode);
PRINT(prop.maxTexture1D);
PRINT(prop.maxTexture2D[0]);
PRINT(prop.maxTexture2D[1]);
PRINT(prop.maxTexture3D[0]);
PRINT(prop.maxTexture3D[1]);
PRINT(prop.maxTexture3D[2]);
// PRINT(prop.maxTexture2DArray[0]);
// PRINT(prop.maxTexture2DArray[1]);
// PRINT(prop.maxTexture2DArray[2]);
PRINT(prop.concurrentKernels);
}
float h_a[N], h_b[N], h_c[NUM_BLOCKS_PER_GRID];
float *d_a, *d_b, *d_c;
cudaMalloc(&d_a, N*sizeof(float));
cudaMalloc(&d_b, N*sizeof(float));
cudaMalloc(&d_c, NUM_BLOCKS_PER_GRID*sizeof(float));
for (auto i=0; i<N; i++) {
h_a[i] = i;
h_b[i ] = i*2;
}
cudaMemcpy(d_a, h_a, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, N*sizeof(float), cudaMemcpyHostToDevice);
dotProduct<<<NUM_BLOCKS_PER_GRID, NUM_THREADS_PER_BLOCK>>>(d_a, d_b, d_c);
cudaMemcpy(h_c, d_c, NUM_BLOCKS_PER_GRID*sizeof(float), cudaMemcpyDeviceToHost);
float sum = 0;
for (auto i=0; i<NUM_BLOCKS_PER_GRID; i++)
sum += h_c[i];
cudaFree(d_c);
cudaFree(d_a);
cudaFree(d_b);
#define sum_squares(x) (x*(x+1)*(2*x+1)/6)
printf("Doues GPU version equal CPU version: %.6g = %.6g\n", sum, 2*sum_squares((float)(N-1)));
// stop time
auto stopTime = std::chrono::high_resolution_clock::now();
PRINT((stopTime - startTime).count());
printf("Goodbye World\n");
}
|
2c3850a148dc7fe13ddfee2cd903826fb462973b.hip | // !!! This is a file automatically generated by hipify!!!
//This file sets the grid for network self interaction
#include "Bucket_Net.h"
#include "System.h"
#include "functor_neighbor.h"
#include "functor_bucket_indexer.h"
#include "function_extend.h"
//take domain and discretize into square buckets of size gridspace
void init_dim_general(
NodeInfoVecs& nodeInfoVecs,
PltInfoVecs& pltInfoVecs,
DomainParams& domainParams,
AuxVecs& auxVecs,
GeneralParams& generalParams) {
double minXTemp = (*(thrust::min_element(nodeInfoVecs.nodeLocX.begin(), nodeInfoVecs.nodeLocX.end())));
double maxXTemp = (*(thrust::max_element(nodeInfoVecs.nodeLocX.begin(), nodeInfoVecs.nodeLocX.end())));
double minYTemp = (*(thrust::min_element(nodeInfoVecs.nodeLocY.begin(), nodeInfoVecs.nodeLocY.end())));
double maxYTemp = (*(thrust::max_element(nodeInfoVecs.nodeLocY.begin(), nodeInfoVecs.nodeLocY.end())));
double minZTemp = (*(thrust::min_element(nodeInfoVecs.nodeLocZ.begin(), nodeInfoVecs.nodeLocZ.end())));
double maxZTemp = (*(thrust::max_element(nodeInfoVecs.nodeLocZ.begin(), nodeInfoVecs.nodeLocZ.end())));
//platelets
domainParams.pltminX = (*(thrust::min_element(pltInfoVecs.pltLocX.begin(), pltInfoVecs.pltLocX.end())));
domainParams.pltmaxX = (*(thrust::max_element(pltInfoVecs.pltLocX.begin(), pltInfoVecs.pltLocX.end())));
domainParams.pltminY = (*(thrust::min_element(pltInfoVecs.pltLocY.begin(), pltInfoVecs.pltLocY.end())));
domainParams.pltmaxY = (*(thrust::max_element(pltInfoVecs.pltLocY.begin(), pltInfoVecs.pltLocY.end())));
domainParams.pltminZ = (*(thrust::min_element(pltInfoVecs.pltLocZ.begin(), pltInfoVecs.pltLocZ.end())));
domainParams.pltmaxZ = (*(thrust::max_element(pltInfoVecs.pltLocZ.begin(), pltInfoVecs.pltLocZ.end())));
double space = 0.0;
domainParams.minX = min(minXTemp, domainParams.pltminX) - space;
domainParams.maxX = max(maxXTemp, domainParams.pltmaxX) + space;
domainParams.minY = min(minYTemp, domainParams.pltminY) - space;
domainParams.maxY = max(maxYTemp, domainParams.pltmaxY) + space;
domainParams.minZ = min(minZTemp, domainParams.pltminZ) - space;
domainParams.maxZ = max(maxZTemp, domainParams.pltmaxZ) + space;
};
void init_net_inct_bucket(
NodeInfoVecs& nodeInfoVecs,
PltInfoVecs& pltInfoVecs,
DomainParams& domainParams,
AuxVecs& auxVecs,
GeneralParams& generalParams) {
//always set bucket count. Update total if different.
domainParams.XBucketCount_net_intc = ceil((domainParams.maxX - domainParams.minX) / domainParams.gridSpacing_net_intc) + 1;
domainParams.YBucketCount_net_intc = ceil((domainParams.maxY - domainParams.minY) / domainParams.gridSpacing_net_intc) + 1;
domainParams.ZBucketCount_net_intc = ceil((domainParams.maxZ - domainParams.minZ) / domainParams.gridSpacing_net_intc) + 1;
if ( (domainParams.XBucketCount_net_intc * domainParams.YBucketCount_net_intc * domainParams.ZBucketCount_net_intc) != domainParams.totalBucketCount_net_intc ) {
std::cout<<"resetting grid for network interact" << std::endl;
std::cout<<"x-bucket: "<< domainParams.XBucketCount_net_intc<<std::endl;
std::cout<<"y-bucket: "<< domainParams.YBucketCount_net_intc<<std::endl;
std::cout<<"z-bucket: "<< domainParams.ZBucketCount_net_intc<<std::endl;
//double amount of buckets in case of resizing networks
domainParams.totalBucketCount_net_intc = domainParams.XBucketCount_net_intc * domainParams.YBucketCount_net_intc * domainParams.ZBucketCount_net_intc;
std::cout<<"grid: "<< domainParams.gridSpacing_net_intc << std::endl;
std::cout<<"total bucket count: "<< domainParams.totalBucketCount_net_intc<<std::endl;
auxVecs.keyBegin_net_intc.resize(domainParams.totalBucketCount_net_intc);
auxVecs.keyEnd_net_intc.resize(domainParams.totalBucketCount_net_intc);
}
thrust::fill(auxVecs.keyBegin_net_intc.begin(),auxVecs.keyBegin_net_intc.end(),0);
thrust::fill(auxVecs.keyEnd_net_intc.begin(),auxVecs.keyEnd_net_intc.end(),0);
};
//convert buckets into neighboring scheme
void extend_net_inct_bucket(
NodeInfoVecs& nodeInfoVecs,
PltInfoVecs& pltInfoVecs,
DomainParams& domainParams,
AuxVecs& auxVecs,
GeneralParams& generalParams) {
//memory is already allocated.
unsigned endIndexExpanded = (auxVecs.endIndexBucketKeys_net_intc) * 27;
//test for removing copies.
unsigned valuesCount = auxVecs.id_value_net_intc.size();
thrust::fill(auxVecs.id_bucket_expanded_net_intc.begin(),auxVecs.id_bucket_expanded_net_intc.end(),0);
thrust::fill(auxVecs.id_value_expanded_net_intc.begin(),auxVecs.id_value_expanded_net_intc.end(),0);
/*
* beginning of constant iterator
*/
thrust::constant_iterator<unsigned> first(27);
/*
* end of constant iterator.
* the plus sign only indicate movement of position, not value.
* e.g. movement is 5 and first iterator is initialized as 9
* result array is [9,9,9,9,9];
*/
thrust::constant_iterator<unsigned> last = first + (auxVecs.endIndexBucketKeys_net_intc); // this is NOT numerical addition!
expand(first, last,
thrust::make_zip_iterator(
thrust::make_tuple(
auxVecs.id_bucket_net_intc.begin(),
auxVecs.id_value_net_intc.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(
auxVecs.id_bucket_expanded_net_intc.begin(),
auxVecs.id_value_expanded_net_intc.begin())));
thrust::counting_iterator<unsigned> countingBegin(0);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
auxVecs.id_bucket_expanded_net_intc.begin(),
countingBegin)),
thrust::make_zip_iterator(
thrust::make_tuple(
auxVecs.id_bucket_expanded_net_intc.begin(),
countingBegin)) + endIndexExpanded,
auxVecs.id_bucket_expanded_net_intc.begin(),
functor_neighbor(
domainParams.XBucketCount_net_intc,
domainParams.YBucketCount_net_intc,
domainParams.ZBucketCount_net_intc));
thrust::stable_sort_by_key(auxVecs.id_bucket_expanded_net_intc.begin(),
auxVecs.id_bucket_expanded_net_intc.end(),
auxVecs.id_value_expanded_net_intc.begin());
thrust::counting_iterator<unsigned> search_begin(0);
thrust::lower_bound(auxVecs.id_bucket_expanded_net_intc.begin(),
auxVecs.id_bucket_expanded_net_intc.end(), search_begin,
search_begin + domainParams.totalBucketCount_net_intc,
auxVecs.keyBegin_net_intc.begin());
thrust::upper_bound(auxVecs.id_bucket_expanded_net_intc.begin(),
auxVecs.id_bucket_expanded_net_intc.end(),search_begin,
search_begin + domainParams.totalBucketCount_net_intc,
auxVecs.keyEnd_net_intc.begin());
/*
unsigned choice = 0;
unsigned bucket = auxVecs.idPlt_bucket[choice];
std::cout<<"bucketplt 0: "<< bucket<<std::endl;
std::cout<<"plt pos: "<<pltInfoVecs.pltLocX[0]<<" "<<pltInfoVecs.pltLocY[0]<<" "<<pltInfoVecs.pltLocZ[0]<<std::endl;
std::cout<<"key len: "<< auxVecs.keyBegin.size() << std::endl;
unsigned begin = auxVecs.keyBegin[bucket];
unsigned end = auxVecs.keyEnd[bucket];
std::cout<<"from bucket scheme:"<<std::endl;
for (unsigned i = begin; i < end; i++) {
unsigned nbr = auxVecs.id_value_expanded[i];
unsigned buck = auxVecs.id_bucket[nbr];
double x_dist = pltInfoVecs.pltLocX[choice] - nodeInfoVecs.nodeLocX[nbr];
double y_dist = pltInfoVecs.pltLocY[choice] - nodeInfoVecs.nodeLocY[nbr];
double z_dist = pltInfoVecs.pltLocZ[choice] - nodeInfoVecs.nodeLocZ[nbr];
double dist = std::sqrt(::pow(x_dist,2.0)+::pow(y_dist,2.0)+::pow(z_dist,2.0));
if (dist < 1.0){
std::cout<<"dist: "<< dist<< " between: "<< choice << " and nbr: "<< nbr<<std::endl;
std::cout<<"nbr: "<< nbr<< " is in bucket: "<< buck <<std::endl;
}
}*/
/*
std::cout<<"from all plt:"<<std::endl;
for (unsigned i = 0; i < generalParams.maxNodeCount; i++) {
unsigned nbr = i;//auxVecs.id_value_expanded[i];
unsigned buck = auxVecs.id_bucket[nbr];
double x_dist = pltInfoVecs.pltLocX[choice] - nodeInfoVecs.nodeLocX[nbr];
double y_dist = pltInfoVecs.pltLocY[choice] - nodeInfoVecs.nodeLocY[nbr];
double z_dist = pltInfoVecs.pltLocZ[choice] - nodeInfoVecs.nodeLocZ[nbr];
double dist = std::sqrt(::pow(x_dist,2.0)+::pow(y_dist,2.0)+::pow(z_dist,2.0));
if (dist < 1.0){
std::cout<<"dist: "<< dist<< " between: "<< choice << " and nbr: "<< nbr<<std::endl;
std::cout<<"nbr: "<< nbr<< " is in bucket: "<< buck <<std::endl;
}
}*/
}
//takes nodes and places in buckets.
void build_net_inct_bucket(
NodeInfoVecs& nodeInfoVecs,
PltInfoVecs& pltInfoVecs,
DomainParams& domainParams,
AuxVecs& auxVecs,
GeneralParams& generalParams) {
thrust::counting_iterator<unsigned> indexBucketBegin(0);
// takes counting iterator and coordinates
// return tuple of keys and values
// transform the points to their bucket indices
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
nodeInfoVecs.nodeLocX.begin(),
nodeInfoVecs.nodeLocY.begin(),
nodeInfoVecs.nodeLocZ.begin(),
indexBucketBegin)),
thrust::make_zip_iterator(
thrust::make_tuple(
nodeInfoVecs.nodeLocX.begin(),
nodeInfoVecs.nodeLocY.begin(),
nodeInfoVecs.nodeLocZ.begin(),
indexBucketBegin)) + generalParams.maxNodeCount,
thrust::make_zip_iterator(
thrust::make_tuple(
auxVecs.id_bucket_net_intc.begin(),
auxVecs.id_value_net_intc.begin())),
functor_bucket_indexer(
domainParams.minX, domainParams.maxX, domainParams.minY,
domainParams.maxY, domainParams.minZ, domainParams.maxZ,
domainParams.XBucketCount_net_intc,
domainParams.YBucketCount_net_intc,
domainParams.ZBucketCount_net_intc,
domainParams.gridSpacing_net_intc));
//test sorting by node instaed of bucket index
thrust::sort_by_key(auxVecs.id_value_net_intc.begin(),
auxVecs.id_value_net_intc.begin() + generalParams.maxNodeCount,
auxVecs.id_bucket_net_intc.begin());
auxVecs.endIndexBucketKeys_net_intc = generalParams.maxNodeCount;
};
| 2c3850a148dc7fe13ddfee2cd903826fb462973b.cu | //This file sets the grid for network self interaction
#include "Bucket_Net.h"
#include "System.h"
#include "functor_neighbor.h"
#include "functor_bucket_indexer.h"
#include "function_extend.h"
//take domain and discretize into square buckets of size gridspace
void init_dim_general(
NodeInfoVecs& nodeInfoVecs,
PltInfoVecs& pltInfoVecs,
DomainParams& domainParams,
AuxVecs& auxVecs,
GeneralParams& generalParams) {
double minXTemp = (*(thrust::min_element(nodeInfoVecs.nodeLocX.begin(), nodeInfoVecs.nodeLocX.end())));
double maxXTemp = (*(thrust::max_element(nodeInfoVecs.nodeLocX.begin(), nodeInfoVecs.nodeLocX.end())));
double minYTemp = (*(thrust::min_element(nodeInfoVecs.nodeLocY.begin(), nodeInfoVecs.nodeLocY.end())));
double maxYTemp = (*(thrust::max_element(nodeInfoVecs.nodeLocY.begin(), nodeInfoVecs.nodeLocY.end())));
double minZTemp = (*(thrust::min_element(nodeInfoVecs.nodeLocZ.begin(), nodeInfoVecs.nodeLocZ.end())));
double maxZTemp = (*(thrust::max_element(nodeInfoVecs.nodeLocZ.begin(), nodeInfoVecs.nodeLocZ.end())));
//platelets
domainParams.pltminX = (*(thrust::min_element(pltInfoVecs.pltLocX.begin(), pltInfoVecs.pltLocX.end())));
domainParams.pltmaxX = (*(thrust::max_element(pltInfoVecs.pltLocX.begin(), pltInfoVecs.pltLocX.end())));
domainParams.pltminY = (*(thrust::min_element(pltInfoVecs.pltLocY.begin(), pltInfoVecs.pltLocY.end())));
domainParams.pltmaxY = (*(thrust::max_element(pltInfoVecs.pltLocY.begin(), pltInfoVecs.pltLocY.end())));
domainParams.pltminZ = (*(thrust::min_element(pltInfoVecs.pltLocZ.begin(), pltInfoVecs.pltLocZ.end())));
domainParams.pltmaxZ = (*(thrust::max_element(pltInfoVecs.pltLocZ.begin(), pltInfoVecs.pltLocZ.end())));
double space = 0.0;
domainParams.minX = min(minXTemp, domainParams.pltminX) - space;
domainParams.maxX = max(maxXTemp, domainParams.pltmaxX) + space;
domainParams.minY = min(minYTemp, domainParams.pltminY) - space;
domainParams.maxY = max(maxYTemp, domainParams.pltmaxY) + space;
domainParams.minZ = min(minZTemp, domainParams.pltminZ) - space;
domainParams.maxZ = max(maxZTemp, domainParams.pltmaxZ) + space;
};
void init_net_inct_bucket(
NodeInfoVecs& nodeInfoVecs,
PltInfoVecs& pltInfoVecs,
DomainParams& domainParams,
AuxVecs& auxVecs,
GeneralParams& generalParams) {
//always set bucket count. Update total if different.
domainParams.XBucketCount_net_intc = ceil((domainParams.maxX - domainParams.minX) / domainParams.gridSpacing_net_intc) + 1;
domainParams.YBucketCount_net_intc = ceil((domainParams.maxY - domainParams.minY) / domainParams.gridSpacing_net_intc) + 1;
domainParams.ZBucketCount_net_intc = ceil((domainParams.maxZ - domainParams.minZ) / domainParams.gridSpacing_net_intc) + 1;
if ( (domainParams.XBucketCount_net_intc * domainParams.YBucketCount_net_intc * domainParams.ZBucketCount_net_intc) != domainParams.totalBucketCount_net_intc ) {
std::cout<<"resetting grid for network interact" << std::endl;
std::cout<<"x-bucket: "<< domainParams.XBucketCount_net_intc<<std::endl;
std::cout<<"y-bucket: "<< domainParams.YBucketCount_net_intc<<std::endl;
std::cout<<"z-bucket: "<< domainParams.ZBucketCount_net_intc<<std::endl;
//double amount of buckets in case of resizing networks
domainParams.totalBucketCount_net_intc = domainParams.XBucketCount_net_intc * domainParams.YBucketCount_net_intc * domainParams.ZBucketCount_net_intc;
std::cout<<"grid: "<< domainParams.gridSpacing_net_intc << std::endl;
std::cout<<"total bucket count: "<< domainParams.totalBucketCount_net_intc<<std::endl;
auxVecs.keyBegin_net_intc.resize(domainParams.totalBucketCount_net_intc);
auxVecs.keyEnd_net_intc.resize(domainParams.totalBucketCount_net_intc);
}
thrust::fill(auxVecs.keyBegin_net_intc.begin(),auxVecs.keyBegin_net_intc.end(),0);
thrust::fill(auxVecs.keyEnd_net_intc.begin(),auxVecs.keyEnd_net_intc.end(),0);
};
//convert buckets into neighboring scheme
void extend_net_inct_bucket(
NodeInfoVecs& nodeInfoVecs,
PltInfoVecs& pltInfoVecs,
DomainParams& domainParams,
AuxVecs& auxVecs,
GeneralParams& generalParams) {
//memory is already allocated.
unsigned endIndexExpanded = (auxVecs.endIndexBucketKeys_net_intc) * 27;
//test for removing copies.
unsigned valuesCount = auxVecs.id_value_net_intc.size();
thrust::fill(auxVecs.id_bucket_expanded_net_intc.begin(),auxVecs.id_bucket_expanded_net_intc.end(),0);
thrust::fill(auxVecs.id_value_expanded_net_intc.begin(),auxVecs.id_value_expanded_net_intc.end(),0);
/*
* beginning of constant iterator
*/
thrust::constant_iterator<unsigned> first(27);
/*
* end of constant iterator.
* the plus sign only indicate movement of position, not value.
* e.g. movement is 5 and first iterator is initialized as 9
* result array is [9,9,9,9,9];
*/
thrust::constant_iterator<unsigned> last = first + (auxVecs.endIndexBucketKeys_net_intc); // this is NOT numerical addition!
expand(first, last,
thrust::make_zip_iterator(
thrust::make_tuple(
auxVecs.id_bucket_net_intc.begin(),
auxVecs.id_value_net_intc.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(
auxVecs.id_bucket_expanded_net_intc.begin(),
auxVecs.id_value_expanded_net_intc.begin())));
thrust::counting_iterator<unsigned> countingBegin(0);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
auxVecs.id_bucket_expanded_net_intc.begin(),
countingBegin)),
thrust::make_zip_iterator(
thrust::make_tuple(
auxVecs.id_bucket_expanded_net_intc.begin(),
countingBegin)) + endIndexExpanded,
auxVecs.id_bucket_expanded_net_intc.begin(),
functor_neighbor(
domainParams.XBucketCount_net_intc,
domainParams.YBucketCount_net_intc,
domainParams.ZBucketCount_net_intc));
thrust::stable_sort_by_key(auxVecs.id_bucket_expanded_net_intc.begin(),
auxVecs.id_bucket_expanded_net_intc.end(),
auxVecs.id_value_expanded_net_intc.begin());
thrust::counting_iterator<unsigned> search_begin(0);
thrust::lower_bound(auxVecs.id_bucket_expanded_net_intc.begin(),
auxVecs.id_bucket_expanded_net_intc.end(), search_begin,
search_begin + domainParams.totalBucketCount_net_intc,
auxVecs.keyBegin_net_intc.begin());
thrust::upper_bound(auxVecs.id_bucket_expanded_net_intc.begin(),
auxVecs.id_bucket_expanded_net_intc.end(),search_begin,
search_begin + domainParams.totalBucketCount_net_intc,
auxVecs.keyEnd_net_intc.begin());
/*
unsigned choice = 0;
unsigned bucket = auxVecs.idPlt_bucket[choice];
std::cout<<"bucketplt 0: "<< bucket<<std::endl;
std::cout<<"plt pos: "<<pltInfoVecs.pltLocX[0]<<" "<<pltInfoVecs.pltLocY[0]<<" "<<pltInfoVecs.pltLocZ[0]<<std::endl;
std::cout<<"key len: "<< auxVecs.keyBegin.size() << std::endl;
unsigned begin = auxVecs.keyBegin[bucket];
unsigned end = auxVecs.keyEnd[bucket];
std::cout<<"from bucket scheme:"<<std::endl;
for (unsigned i = begin; i < end; i++) {
unsigned nbr = auxVecs.id_value_expanded[i];
unsigned buck = auxVecs.id_bucket[nbr];
double x_dist = pltInfoVecs.pltLocX[choice] - nodeInfoVecs.nodeLocX[nbr];
double y_dist = pltInfoVecs.pltLocY[choice] - nodeInfoVecs.nodeLocY[nbr];
double z_dist = pltInfoVecs.pltLocZ[choice] - nodeInfoVecs.nodeLocZ[nbr];
double dist = std::sqrt(std::pow(x_dist,2.0)+std::pow(y_dist,2.0)+std::pow(z_dist,2.0));
if (dist < 1.0){
std::cout<<"dist: "<< dist<< " between: "<< choice << " and nbr: "<< nbr<<std::endl;
std::cout<<"nbr: "<< nbr<< " is in bucket: "<< buck <<std::endl;
}
}*/
/*
std::cout<<"from all plt:"<<std::endl;
for (unsigned i = 0; i < generalParams.maxNodeCount; i++) {
unsigned nbr = i;//auxVecs.id_value_expanded[i];
unsigned buck = auxVecs.id_bucket[nbr];
double x_dist = pltInfoVecs.pltLocX[choice] - nodeInfoVecs.nodeLocX[nbr];
double y_dist = pltInfoVecs.pltLocY[choice] - nodeInfoVecs.nodeLocY[nbr];
double z_dist = pltInfoVecs.pltLocZ[choice] - nodeInfoVecs.nodeLocZ[nbr];
double dist = std::sqrt(std::pow(x_dist,2.0)+std::pow(y_dist,2.0)+std::pow(z_dist,2.0));
if (dist < 1.0){
std::cout<<"dist: "<< dist<< " between: "<< choice << " and nbr: "<< nbr<<std::endl;
std::cout<<"nbr: "<< nbr<< " is in bucket: "<< buck <<std::endl;
}
}*/
}
//takes nodes and places in buckets.
void build_net_inct_bucket(
NodeInfoVecs& nodeInfoVecs,
PltInfoVecs& pltInfoVecs,
DomainParams& domainParams,
AuxVecs& auxVecs,
GeneralParams& generalParams) {
thrust::counting_iterator<unsigned> indexBucketBegin(0);
// takes counting iterator and coordinates
// return tuple of keys and values
// transform the points to their bucket indices
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(
nodeInfoVecs.nodeLocX.begin(),
nodeInfoVecs.nodeLocY.begin(),
nodeInfoVecs.nodeLocZ.begin(),
indexBucketBegin)),
thrust::make_zip_iterator(
thrust::make_tuple(
nodeInfoVecs.nodeLocX.begin(),
nodeInfoVecs.nodeLocY.begin(),
nodeInfoVecs.nodeLocZ.begin(),
indexBucketBegin)) + generalParams.maxNodeCount,
thrust::make_zip_iterator(
thrust::make_tuple(
auxVecs.id_bucket_net_intc.begin(),
auxVecs.id_value_net_intc.begin())),
functor_bucket_indexer(
domainParams.minX, domainParams.maxX, domainParams.minY,
domainParams.maxY, domainParams.minZ, domainParams.maxZ,
domainParams.XBucketCount_net_intc,
domainParams.YBucketCount_net_intc,
domainParams.ZBucketCount_net_intc,
domainParams.gridSpacing_net_intc));
//test sorting by node instaed of bucket index
thrust::sort_by_key(auxVecs.id_value_net_intc.begin(),
auxVecs.id_value_net_intc.begin() + generalParams.maxNodeCount,
auxVecs.id_bucket_net_intc.begin());
auxVecs.endIndexBucketKeys_net_intc = generalParams.maxNodeCount;
};
|
76d3044858f9277d75383f71c4cc9f5241dc25fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/prelu.h"
#include "paddle/fluid/platform/bfloat16.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
namespace math {
#define CUDA_NUM_THREADS 1024
inline static int PADDLE_GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename T>
__global__ void PReluChannelFirstWiseKernel(const T *input,
const T *alpha,
T *output,
size_t channel_num,
size_t plane_size,
size_t numel) {
CUDA_KERNEL_LOOP(index, numel) {
size_t temp = index / plane_size;
size_t channel_index = temp % channel_num;
T scale = alpha[channel_index];
T x = input[index];
T zero = static_cast<T>(0);
output[index] = (x > zero) ? x : scale * x;
}
}
template <typename T>
__global__ void PReluChannelLastWiseKernel(const T *input,
const T *alpha,
T *output,
size_t channel_num,
size_t numel) {
CUDA_KERNEL_LOOP(index, numel) {
size_t channel_index = index % channel_num;
T scale = alpha[channel_index];
T x = input[index];
T zero = static_cast<T>(0);
output[index] = (x > zero) ? x : scale * x;
}
}
template <typename T>
__global__ void PReluElementWiseKernel(const T *input,
const T *alpha,
T *output,
size_t spatial_size,
size_t numel) {
CUDA_KERNEL_LOOP(index, numel) {
size_t element_index = index % spatial_size;
T scale = alpha[element_index];
T x = input[index];
T zero = static_cast<T>(0);
output[index] = (x > zero) ? x : scale * x;
}
}
template <typename T>
__global__ void PReluScalarKernel(const T *input,
const T *alpha,
T *output,
size_t numel) {
T scale = alpha[0];
CUDA_KERNEL_LOOP(index, numel) {
T x = input[index];
T zero = static_cast<T>(0);
output[index] = (x > zero) ? x : scale * x;
}
}
template <typename T>
void PreluChannelWiseDirectCUDAFunctor<T>::operator()(gpuStream_t stream,
const T *input,
const T *alpha,
T *output,
size_t batch_size,
size_t channel,
bool channel_last,
size_t numel) {
if (channel_last) {
hipLaunchKernelGGL(( PReluChannelLastWiseKernel), dim3(PADDLE_GET_BLOCKS(numel)),
dim3(CUDA_NUM_THREADS),
0,
stream,
input, alpha, output, channel, numel);
} else {
hipLaunchKernelGGL(( PReluChannelFirstWiseKernel), dim3(PADDLE_GET_BLOCKS(numel)),
dim3(CUDA_NUM_THREADS),
0,
stream,
input, alpha, output, channel, numel / batch_size / channel, numel);
}
}
template <typename T>
void PreluElementWiseDirectCUDAFunctor<T>::operator()(gpuStream_t stream,
const T *input,
const T *alpha,
T *output,
size_t batch_size,
size_t numel) {
hipLaunchKernelGGL(( PReluElementWiseKernel), dim3(PADDLE_GET_BLOCKS(numel)),
dim3(CUDA_NUM_THREADS),
0,
stream,
input, alpha, output, numel / batch_size, numel);
}
template <typename T>
void PreluScalarDirectCUDAFunctor<T>::operator()(gpuStream_t stream,
const T *input,
const T *alpha,
T *output,
size_t numel) {
hipLaunchKernelGGL(( PReluScalarKernel), dim3(PADDLE_GET_BLOCKS(numel)), dim3(CUDA_NUM_THREADS), 0, stream,
input, alpha, output, numel);
}
template class PreluChannelWiseDirectCUDAFunctor<float>;
template class PreluChannelWiseDirectCUDAFunctor<platform::float16>;
template class PreluChannelWiseDirectCUDAFunctor<platform::bfloat16>;
template class PreluChannelWiseDirectCUDAFunctor<double>;
template class PreluElementWiseDirectCUDAFunctor<float>;
template class PreluElementWiseDirectCUDAFunctor<platform::float16>;
template class PreluElementWiseDirectCUDAFunctor<platform::bfloat16>;
template class PreluElementWiseDirectCUDAFunctor<double>;
template class PreluScalarDirectCUDAFunctor<float>;
template class PreluScalarDirectCUDAFunctor<platform::float16>;
template class PreluScalarDirectCUDAFunctor<platform::bfloat16>;
template class PreluScalarDirectCUDAFunctor<double>;
} // namespace math
} // namespace operators
} // namespace paddle
| 76d3044858f9277d75383f71c4cc9f5241dc25fc.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/prelu.h"
#include "paddle/fluid/platform/bfloat16.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
namespace math {
#define CUDA_NUM_THREADS 1024
inline static int PADDLE_GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename T>
__global__ void PReluChannelFirstWiseKernel(const T *input,
const T *alpha,
T *output,
size_t channel_num,
size_t plane_size,
size_t numel) {
CUDA_KERNEL_LOOP(index, numel) {
size_t temp = index / plane_size;
size_t channel_index = temp % channel_num;
T scale = alpha[channel_index];
T x = input[index];
T zero = static_cast<T>(0);
output[index] = (x > zero) ? x : scale * x;
}
}
template <typename T>
__global__ void PReluChannelLastWiseKernel(const T *input,
const T *alpha,
T *output,
size_t channel_num,
size_t numel) {
CUDA_KERNEL_LOOP(index, numel) {
size_t channel_index = index % channel_num;
T scale = alpha[channel_index];
T x = input[index];
T zero = static_cast<T>(0);
output[index] = (x > zero) ? x : scale * x;
}
}
template <typename T>
__global__ void PReluElementWiseKernel(const T *input,
const T *alpha,
T *output,
size_t spatial_size,
size_t numel) {
CUDA_KERNEL_LOOP(index, numel) {
size_t element_index = index % spatial_size;
T scale = alpha[element_index];
T x = input[index];
T zero = static_cast<T>(0);
output[index] = (x > zero) ? x : scale * x;
}
}
template <typename T>
__global__ void PReluScalarKernel(const T *input,
const T *alpha,
T *output,
size_t numel) {
T scale = alpha[0];
CUDA_KERNEL_LOOP(index, numel) {
T x = input[index];
T zero = static_cast<T>(0);
output[index] = (x > zero) ? x : scale * x;
}
}
template <typename T>
void PreluChannelWiseDirectCUDAFunctor<T>::operator()(gpuStream_t stream,
const T *input,
const T *alpha,
T *output,
size_t batch_size,
size_t channel,
bool channel_last,
size_t numel) {
if (channel_last) {
PReluChannelLastWiseKernel<<<PADDLE_GET_BLOCKS(numel),
CUDA_NUM_THREADS,
0,
stream>>>(
input, alpha, output, channel, numel);
} else {
PReluChannelFirstWiseKernel<<<PADDLE_GET_BLOCKS(numel),
CUDA_NUM_THREADS,
0,
stream>>>(
input, alpha, output, channel, numel / batch_size / channel, numel);
}
}
template <typename T>
void PreluElementWiseDirectCUDAFunctor<T>::operator()(gpuStream_t stream,
const T *input,
const T *alpha,
T *output,
size_t batch_size,
size_t numel) {
PReluElementWiseKernel<<<PADDLE_GET_BLOCKS(numel),
CUDA_NUM_THREADS,
0,
stream>>>(
input, alpha, output, numel / batch_size, numel);
}
template <typename T>
void PreluScalarDirectCUDAFunctor<T>::operator()(gpuStream_t stream,
const T *input,
const T *alpha,
T *output,
size_t numel) {
PReluScalarKernel<<<PADDLE_GET_BLOCKS(numel), CUDA_NUM_THREADS, 0, stream>>>(
input, alpha, output, numel);
}
template class PreluChannelWiseDirectCUDAFunctor<float>;
template class PreluChannelWiseDirectCUDAFunctor<platform::float16>;
template class PreluChannelWiseDirectCUDAFunctor<platform::bfloat16>;
template class PreluChannelWiseDirectCUDAFunctor<double>;
template class PreluElementWiseDirectCUDAFunctor<float>;
template class PreluElementWiseDirectCUDAFunctor<platform::float16>;
template class PreluElementWiseDirectCUDAFunctor<platform::bfloat16>;
template class PreluElementWiseDirectCUDAFunctor<double>;
template class PreluScalarDirectCUDAFunctor<float>;
template class PreluScalarDirectCUDAFunctor<platform::float16>;
template class PreluScalarDirectCUDAFunctor<platform::bfloat16>;
template class PreluScalarDirectCUDAFunctor<double>;
} // namespace math
} // namespace operators
} // namespace paddle
|
d63e4488dac785d680264adeecf84eef88326b6e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cstdio>
#include <chrono>
typedef std::chrono::high_resolution_clock Clock;
#define ITER 65536
// CPU version of the vector add function
void vector_add_cpu(int *a, int *b, int *c, int n) {
int i;
// Add the vector elements a and b to the vector c
for (i = 0; i < n; ++i) {
c[i] = a[i] + b[i];
}
}
// GPU version of the vector add function
__global__ void vector_add_gpu(int *gpu_a, int *gpu_b, int *gpu_c, int n) {
//int i = blockIdx.x * blockDim.x + threadIdx.x;
int idx = threadIdx.x;
// No for loop needed because the CUDA runtime
// will thread this ITER times
gpu_c[idx] = gpu_a[idx] * gpu_b[idx];
}
int main() {
int *a, *b, *c, *gpu_r;
int *gpu_a, *gpu_b, *gpu_c;
a = (int *)malloc(ITER * sizeof(int));
b = (int *)malloc(ITER * sizeof(int));
c = (int *)malloc(ITER * sizeof(int));
gpu_r = (int *)malloc(ITER * sizeof(int));
// We need variables accessible to the GPU,
// so hipMallocManaged provides these
hipMalloc((void**)&gpu_a, ITER * sizeof(int));
hipMalloc((void**)&gpu_b, ITER * sizeof(int));
hipMalloc((void**)&gpu_c, ITER * sizeof(int));
for (int i = 0; i < ITER; ++i) {
a[i] = i;
b[i] = i;
c[i] = i;
gpu_r[i] = i;
}
// Call the CPU function and time it
auto cpu_start = Clock::now();
vector_add_cpu(a, b, c, ITER);
auto cpu_end = Clock::now();
std::cout << "vector_add_cpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(cpu_end - cpu_start).count()
<< " nanoseconds.\n";
for(int i=0;i<10;i++)
std::cout << "vector_add_cpu : " << c[i] << " ";
std::cout<<"\n";
/*
for(int i=0;i<10;i++)
std::cout << "result : " << result[i] << " ";
std::cout<<"\n";
*/
// Call the GPU function and time it
// The triple angle brakets is a CUDA runtime extension that allows
// parameters of a CUDA kernel call to be passed.
// In this example, we are passing one thread block with ITER threads.
//hipMemcpy(void* dst, const void* src, size_t count, hipMemcpyHostToDevice/hipMemcpyDeviceToHost);
hipMemcpy(gpu_a, a, ITER * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(gpu_b, b, ITER * sizeof(int), hipMemcpyHostToDevice);
auto gpu_start = Clock::now();
//vector_add_gpu <<<2, ITER/2>>> (gpu_a, gpu_b, gpu_c, ITER);
hipLaunchKernelGGL(( vector_add_gpu) , dim3(1), dim3(ITER), 0, 0, gpu_a, gpu_b, gpu_c, ITER);
hipDeviceSynchronize();
auto gpu_end = Clock::now();
std::cout << "vector_add_gpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(gpu_end - gpu_start).count()
<< " nanoseconds.\n";
/*
for(int i=0;i<10;i++)
std::cout << "vector_add_gpu : " << gpu_r[i] << " ";
std::cout<<"\n";
*/
hipMemcpy(gpu_r, gpu_c, ITER * sizeof(int), hipMemcpyDeviceToHost);
std::cout<<"result of gpu_c"<<std::endl;
for(int i=0;i<10;i++)
std::cout << "vector_add_gpu : " << gpu_r[i] << " ";
std::cout<<"\n";
//Free the GPU-function based memory allocations
hipFree(a);
hipFree(b);
hipFree(c);
// Free the CPU-function based memory allocations
free(a);
free(b);
free(c);
free(gpu_r);
/*
int InputData[5] = {1, 2, 3, 4, 5};
int OutputData[5] = {0};
int* GraphicsCard_memory;
//
hipMalloc((void**)&GraphicsCard_memory, 5*sizeof(int));
//PC
hipMemcpy(GraphicsCard_memory, InputData, 5*sizeof(int), hipMemcpyHostToDevice);
// PC
hipMemcpy(OutputData, GraphicsCard_memory, 5*sizeof(int), hipMemcpyDeviceToHost);
//
for( int i = 0; i < 5; i++)
{
printf(" OutputData[%d] : %d\n, i, OutputData[i]);
}
//
hipFree(GraphicsCard_memory);
*/
return 0;
}
| d63e4488dac785d680264adeecf84eef88326b6e.cu | #include "cuda_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cstdio>
#include <chrono>
typedef std::chrono::high_resolution_clock Clock;
#define ITER 65536
// CPU version of the vector add function
void vector_add_cpu(int *a, int *b, int *c, int n) {
int i;
// Add the vector elements a and b to the vector c
for (i = 0; i < n; ++i) {
c[i] = a[i] + b[i];
}
}
// GPU version of the vector add function
__global__ void vector_add_gpu(int *gpu_a, int *gpu_b, int *gpu_c, int n) {
//int i = blockIdx.x * blockDim.x + threadIdx.x;
int idx = threadIdx.x;
// No for loop needed because the CUDA runtime
// will thread this ITER times
gpu_c[idx] = gpu_a[idx] * gpu_b[idx];
}
int main() {
int *a, *b, *c, *gpu_r;
int *gpu_a, *gpu_b, *gpu_c;
a = (int *)malloc(ITER * sizeof(int));
b = (int *)malloc(ITER * sizeof(int));
c = (int *)malloc(ITER * sizeof(int));
gpu_r = (int *)malloc(ITER * sizeof(int));
// We need variables accessible to the GPU,
// so cudaMallocManaged provides these
cudaMalloc((void**)&gpu_a, ITER * sizeof(int));
cudaMalloc((void**)&gpu_b, ITER * sizeof(int));
cudaMalloc((void**)&gpu_c, ITER * sizeof(int));
for (int i = 0; i < ITER; ++i) {
a[i] = i;
b[i] = i;
c[i] = i;
gpu_r[i] = i;
}
// Call the CPU function and time it
auto cpu_start = Clock::now();
vector_add_cpu(a, b, c, ITER);
auto cpu_end = Clock::now();
std::cout << "vector_add_cpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(cpu_end - cpu_start).count()
<< " nanoseconds.\n";
for(int i=0;i<10;i++)
std::cout << "vector_add_cpu : " << c[i] << " ";
std::cout<<"\n";
/*
for(int i=0;i<10;i++)
std::cout << "result : " << result[i] << " ";
std::cout<<"\n";
*/
// Call the GPU function and time it
// The triple angle brakets is a CUDA runtime extension that allows
// parameters of a CUDA kernel call to be passed.
// In this example, we are passing one thread block with ITER threads.
//cudaMemcpy(void* dst, const void* src, size_t count, cudaMemcpyHostToDevice/cudaMemcpyDeviceToHost);
cudaMemcpy(gpu_a, a, ITER * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, b, ITER * sizeof(int), cudaMemcpyHostToDevice);
auto gpu_start = Clock::now();
//vector_add_gpu <<<2, ITER/2>>> (gpu_a, gpu_b, gpu_c, ITER);
vector_add_gpu <<<1, ITER>>> (gpu_a, gpu_b, gpu_c, ITER);
cudaDeviceSynchronize();
auto gpu_end = Clock::now();
std::cout << "vector_add_gpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(gpu_end - gpu_start).count()
<< " nanoseconds.\n";
/*
for(int i=0;i<10;i++)
std::cout << "vector_add_gpu : " << gpu_r[i] << " ";
std::cout<<"\n";
*/
cudaMemcpy(gpu_r, gpu_c, ITER * sizeof(int), cudaMemcpyDeviceToHost);
std::cout<<"result of gpu_c"<<std::endl;
for(int i=0;i<10;i++)
std::cout << "vector_add_gpu : " << gpu_r[i] << " ";
std::cout<<"\n";
//Free the GPU-function based memory allocations
cudaFree(a);
cudaFree(b);
cudaFree(c);
// Free the CPU-function based memory allocations
free(a);
free(b);
free(c);
free(gpu_r);
/*
int InputData[5] = {1, 2, 3, 4, 5};
int OutputData[5] = {0};
int* GraphicsCard_memory;
//그래픽카드 메모리의 할당
cudaMalloc((void**)&GraphicsCard_memory, 5*sizeof(int));
//PC에서 그래픽 카드로 데이터 복사
cudaMemcpy(GraphicsCard_memory, InputData, 5*sizeof(int), cudaMemcpyHostToDevice);
//그래픽 카드에서 PC로 데이터 복사
cudaMemcpy(OutputData, GraphicsCard_memory, 5*sizeof(int), cudaMemcpyDeviceToHost);
//결과 출력
for( int i = 0; i < 5; i++)
{
printf(" OutputData[%d] : %d\n, i, OutputData[i]);
}
//그래픽 카드 메모리의 해체
cudaFree(GraphicsCard_memory);
*/
return 0;
}
|
5332bc074fbdc327fac3985a56188b45e9f4ead9.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Config.h>
#include <ATen/Dispatch.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/Parallel.h>
#include <ATen/SparseTensorImpl.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/Resize.h>
#include <hip/hip_runtime.h>
#include <type_traits>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_sparse_sparse_matmul_native.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_like_native.h>
#endif
#include <thrust/device_ptr.h>
#include <thrust/for_each.h>
#include <thrust/sequence.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPDataType.h>
#include <ATen/hip/HIPUtils.h>
#include <ATen/hip/ThrustAllocator.h>
#include <hipsparse.h>
#include <ATen/native/sparse/hip/SparseHIPBlas.h>
#include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/functional.h>
#include <thrust/binary_search.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/discard_iterator.h>
#if defined(__HIPCC__) && (CUSPARSE_VERSION >= 11000)
#define IS_CUSPARSE11_AVAILABLE() 1
#else
#define IS_CUSPARSE11_AVAILABLE() 0
#endif
#if IS_CUSPARSE11_AVAILABLE()
#include <hip/library_types.h>
#endif
namespace at {
namespace native {
namespace {
using namespace at::sparse;
Tensor _to_csr_int(const Tensor& rowIndices, int64_t dim, int64_t nnz) {
Tensor csr = at::empty({dim + 1}, CUDA(kInt));
Tensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(
rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>());
return csr;
}
#pragma push
// NVCC complains that confirm_mult_size is not used,
// but it is used in specializations of CusparseMatrixMultiplyOp below
#pragma diag_suppress 177 // Function was declared but never referenced
int confirm_mult_size(const std::vector<int>& mat1_size, const std::vector<int>& mat2_size) {
TORCH_CHECK(
mat1_size[1] == mat2_size[0],
"mat1 and mat2 shapes cannot be multiplied (",
mat1_size[0],
"x",
mat1_size[1],
" and ",
mat2_size[0],
"x",
mat2_size[1],
")");
return mat1_size[1];
}
#pragma pop
void create_general_description_(hipsparseMatDescr_t& description_) {
TORCH_CUDASPARSE_CHECK(hipsparseCreateMatDescr(&description_));
TORCH_CUDASPARSE_CHECK(hipsparseSetMatType(description_, HIPSPARSE_MATRIX_TYPE_GENERAL));
TORCH_CUDASPARSE_CHECK(hipsparseSetMatIndexBase(description_, HIPSPARSE_INDEX_BASE_ZERO));
}
// csrMatrixRef is used to have a representation of a raw CSR matrix representation
// comming from `sparse_sparse_matmul_cuda_kernel` function.
// Moreover this implements a RAII guard for a cusparse descriptor
template<class scalar_t>
struct csrMatrixRef {
int* csr_indices_{nullptr};
int* csr_pointers_{nullptr};
scalar_t* csr_values_{nullptr};
int nnz_{0};
std::vector<int> size_{};
#if IS_CUSPARSE11_AVAILABLE()
hipsparseSpMatDescr_t description_{0};
#else
hipsparseMatDescr_t description_{0};
#endif
csrMatrixRef() {
#if !IS_CUSPARSE11_AVAILABLE()
create_general_description_(description_);
#endif
}
csrMatrixRef(
int* csr_indices,
int* csr_pointers,
scalar_t* csr_values,
int nnz,
const std::vector<int>& size)
: csr_indices_{csr_indices},
csr_pointers_{csr_pointers},
csr_values_{csr_values},
nnz_{nnz},
size_{size} {
#if IS_CUSPARSE11_AVAILABLE()
hipDataType cuda_data_type = at::cuda::getCudaDataType<scalar_t>();
TORCH_CUDASPARSE_CHECK(hipsparseCreateCsr(
&description_,
this->size(0),
this->size(1),
this->nnz_,
this->csr_pointers_,
this->csr_indices_,
this->csr_values_,
HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO,
cuda_data_type));
#else
create_general_description_(description_);
#endif
}
~csrMatrixRef() {
#if IS_CUSPARSE11_AVAILABLE()
hipsparseDestroySpMat(description_);
#else
hipsparseDestroyMatDescr(description_);
#endif
}
int size(int index) const {
return size_.at(index);
}
};
// csrOutput is used to represent the output for `CusparseMatrixMultiplyOp`
// Note that `csrOutput` is different from `csrMatrixRef` and the purpose
// of this was to have a materialized version of a CSR matrix.
// Moreover this implements a RAII guard for a cusparse descriptor
struct csrOutput {
Tensor csr_indices_{};
Tensor csr_pointers_{};
at::Tensor csr_values_{};
int nnz_{0};
std::vector<int> size_;
hipsparseMatDescr_t description_{0};
csrOutput(const std::vector<int> &size) : size_{size} {
create_general_description_(description_);
}
~csrOutput() {
hipsparseDestroyMatDescr(description_);
}
int size(int index) const {
return size_.at(index);
}
};
#if IS_CUSPARSE11_AVAILABLE()
// RAII guard helps to support cuSparse 11 API for `A @ B` operation
// This generic template exists because with cuSparse the `scalar_t` type could be a double or float
template <class scalar_t>
struct CusparseMatrixMultiplyOp {
hipsparseSpGEMMDescr_t spgemmDesc;
CusparseMatrixMultiplyOp() {
static_assert(
std::is_same<c10::Half, scalar_t>::value ||
std::is_same<c10::BFloat16, scalar_t>::value ||
std::is_same<float, scalar_t>::value ||
std::is_same<double, scalar_t>::value ||
std::is_same<c10::complex<float>, scalar_t>::value ||
std::is_same<c10::complex<double>, scalar_t>::value,
"cusparseSpGEMM only supports data type of half, bfloat16, float, double and complex float, double.");
// SpGEMM Computation
TORCH_CUDASPARSE_CHECK(hipsparseSpGEMM_createDescr(&spgemmDesc));
}
~CusparseMatrixMultiplyOp() {
// destroy matrix/vector descriptors
hipsparseSpGEMM_destroyDescr(spgemmDesc);
}
csrOutput operator ()(
const csrMatrixRef<scalar_t>& A,
const csrMatrixRef<scalar_t>& B,
Tensor& output_values,
Tensor& output_indices) {
const int A_num_rows = A.size(0);
const int B_num_cols = B.size(1);
csrOutput out({A.size(0), B.size(1)});
out.csr_pointers_ = at::empty({out.size(0) + 1}, output_indices.options().dtype(kInt));
int* dC_csrOffsets = out.csr_pointers_.data_ptr<int>();
int* dC_columns = nullptr;
scalar_t* dC_values = nullptr;
scalar_t alpha = 1.0f;
scalar_t beta = 0.0f;
hipsparseOperation_t opA = HIPSPARSE_OPERATION_NON_TRANSPOSE;
hipsparseOperation_t opB = HIPSPARSE_OPERATION_NON_TRANSPOSE;
csrMatrixRef<scalar_t> C(
nullptr,
nullptr,
nullptr,
/*nnz*/0,
{A_num_rows, B_num_cols}
);
//--------------------------------------------------------------------------
// CUSPARSE APIs
hipsparseHandle_t handle = at::cuda::getCurrentCUDASparseHandle();
void *dBuffer1 = NULL, *dBuffer2 = NULL;
size_t bufferSize1 = 0, bufferSize2 = 0;
hipsparseSpMatDescr_t matA = A.description_;
hipsparseSpMatDescr_t matB = B.description_;
hipsparseSpMatDescr_t matC = C.description_;
//--------------------------------------------------------------------------
hipDataType computeType = at::cuda::getCudaDataType<scalar_t>();
// If a specific GPU model does not provide native support for a given data type,
// the routine returns HIPSPARSE_STATUS_ARCH_MISMATCH error
hipDeviceProp_t* prop = at::cuda::getCurrentDeviceProperties();
TORCH_CHECK(prop->major >= 5 && !((10*prop->major + prop->minor) < 53 && computeType == HIP_R_16F),
"sparse_mm: CUDA Float16 requires compute capability >= 53 (current: ", prop->major, prop->minor, ")");
TORCH_CHECK(!(prop->major < 8 && computeType == CUDA_R_16BF),
"sparse_mm: CUDA BFloat16 requires compute capability >= 80 (current: ", prop->major, prop->minor, ")");
// ask bufferSize1 bytes for external memory
TORCH_CUDASPARSE_CHECK(hipsparseSpGEMM_workEstimation(
handle,
opA,
opB,
&alpha,
matA,
matB,
&beta,
matC,
computeType,
HIPSPARSE_SPGEMM_DEFAULT,
spgemmDesc,
&bufferSize1,
NULL));
auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
at::DataPtr dataPtr1 = allocator.allocate(bufferSize1);
dBuffer1 = dataPtr1.get();
// inspect the matrices A and B to understand the memory requiremnent for
// the next step
TORCH_CUDASPARSE_CHECK(hipsparseSpGEMM_workEstimation(
handle,
opA,
opB,
&alpha,
matA,
matB,
&beta,
matC,
computeType,
HIPSPARSE_SPGEMM_DEFAULT,
spgemmDesc,
&bufferSize1,
dBuffer1));
// ask bufferSize2 bytes for external memory
TORCH_CUDASPARSE_CHECK(hipsparseSpGEMM_compute(
handle,
opA,
opB,
&alpha,
matA,
matB,
&beta,
matC,
computeType,
HIPSPARSE_SPGEMM_DEFAULT,
spgemmDesc,
&bufferSize2,
NULL));
at::DataPtr dataPtr2 = allocator.allocate(bufferSize2);
dBuffer2 = dataPtr2.get();
// compute the intermediate product of A * B
TORCH_CUDASPARSE_CHECK(hipsparseSpGEMM_compute(
handle,
opA,
opB,
&alpha,
matA,
matB,
&beta,
matC,
computeType,
HIPSPARSE_SPGEMM_DEFAULT,
spgemmDesc,
&bufferSize2,
dBuffer2));
// get matrix C non-zero entries C_num_nnz1
int64_t C_num_rows1, C_num_cols1, C_num_nnz1;
TORCH_CUDASPARSE_CHECK(
hipsparseSpMatGetSize(matC, &C_num_rows1, &C_num_cols1, &C_num_nnz1));
// allocate matrix C
// allocate C offsets
out.nnz_ = C_num_nnz1;
out.csr_indices_ = at::empty({out.nnz_}, output_indices.options().dtype(kInt));
out.csr_values_ = at::empty({out.nnz_}, output_values.options());
dC_columns = out.csr_indices_.data_ptr<int>();
dC_values = out.csr_values_.data_ptr<scalar_t>();
// update matC with the new pointers
TORCH_CUDASPARSE_CHECK(
hipsparseCsrSetPointers(matC, dC_csrOffsets, dC_columns, dC_values));
// copy the final products to the matrix C
TORCH_CUDASPARSE_CHECK(hipsparseSpGEMM_copy(
handle,
opA,
opB,
&alpha,
matA,
matB,
&beta,
matC,
computeType,
HIPSPARSE_SPGEMM_DEFAULT,
spgemmDesc));
return out;
}
};
template struct CusparseMatrixMultiplyOp<float>;
template struct CusparseMatrixMultiplyOp<double>;
#else // if not IS_CUSPARSE11_AVAILABLE()
using DcsrMatrixRef = csrMatrixRef<double>;
using ScsrMatrixRef = csrMatrixRef<float>;
// RAII guard helps to support cuSparse 10 API for `A @ B` operation
// This generic template exists because with cuSparse the `scalar_t` type could be a double or float
template <class scalar_t>
struct CusparseMatrixMultiplyOp {
csrOutput operator()(
const csrMatrixRef<scalar_t>& lhs,
const csrMatrixRef<scalar_t>& rhs,
Tensor &output_values,
Tensor &output_indices)
{
TORCH_INTERNAL_ASSERT(false, "cusparse csr sparse-sparse MM only supports data type of float and double.");
}
};
// Specializacion for `A @ B` operation for double values with cuSparse
template<> struct CusparseMatrixMultiplyOp<double> {
csrgemm2Info_t gemm2Info_;
CusparseMatrixMultiplyOp() {
TORCH_CUDASPARSE_CHECK(hipsparseCreateCsrgemm2Info(&gemm2Info_));
}
~CusparseMatrixMultiplyOp() {
hipsparseDestroyCsrgemm2Info(gemm2Info_);
}
csrOutput operator ()(
const DcsrMatrixRef& lhs,
const DcsrMatrixRef& rhs,
Tensor &output_values,
Tensor &output_indices) {
double alpha = 1.0;
DcsrMatrixRef empty;
return Dgemm2(lhs, rhs, empty, &alpha, nullptr, output_values, output_indices);
}
csrOutput Dgemm2(
const DcsrMatrixRef& A,
const DcsrMatrixRef& B,
const DcsrMatrixRef& C,
const double* alpha,
const double* beta,
Tensor &output_values,
Tensor &output_indices) {
void* buffer_{nullptr};
hipsparseHandle_t cusparseHandle_ = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(hipsparseSetPointerMode(cusparseHandle_, HIPSPARSE_POINTER_MODE_HOST));
csrOutput out({A.size(0), B.size(1)});
int innerSize = confirm_mult_size(A.size_, B.size_);
out.csr_pointers_ = at::empty({out.size(0) + 1}, output_indices.options().dtype(kInt));
// Compute needed buffer size
size_t new_bubber_sz;
TORCH_CUDASPARSE_CHECK(hipsparseDcsrgemm2_bufferSizeExt(
cusparseHandle_,
out.size(0),
out.size(1),
innerSize,
alpha,
A.description_,
A.nnz_,
A.csr_pointers_,
A.csr_indices_,
B.description_,
B.nnz_,
B.csr_pointers_,
B.csr_indices_,
beta,
C.description_,
C.nnz_,
C.csr_pointers_,
C.csr_indices_,
gemm2Info_,
&new_bubber_sz));
// (Re)allocate buffer if needed
auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
at::DataPtr data_ptr = allocator.allocate(new_bubber_sz);
buffer_ = data_ptr.get();
// Find the resulting non-zero pattern.
TORCH_CUDASPARSE_CHECK(hipsparseXcsrgemm2Nnz(
cusparseHandle_,
out.size(0),
out.size(1),
innerSize,
A.description_,
A.nnz_,
A.csr_pointers_,
A.csr_indices_,
B.description_,
B.nnz_,
B.csr_pointers_,
B.csr_indices_,
C.description_,
C.nnz_,
C.csr_pointers_,
C.csr_indices_,
out.description_,
out.csr_pointers_.data_ptr<int>(),
&out.nnz_,
gemm2Info_,
buffer_));
out.csr_indices_ = at::empty({out.nnz_}, output_indices.options().dtype(kInt));
out.csr_values_ = at::empty({out.nnz_}, output_values.options());
// Perform the gemm2 operation for doubles
// out = alpha A B + beta C
TORCH_CUDASPARSE_CHECK(hipsparseDcsrgemm2(
cusparseHandle_,
out.size(0),
out.size(1),
innerSize,
alpha,
A.description_,
A.nnz_,
A.csr_values_,
A.csr_pointers_,
A.csr_indices_,
B.description_,
B.nnz_,
B.csr_values_,
B.csr_pointers_,
B.csr_indices_,
beta,
C.description_,
C.nnz_,
C.csr_values_,
C.csr_pointers_,
C.csr_indices_,
out.description_,
out.csr_values_.data_ptr<double>(),
out.csr_pointers_.data_ptr<int>(),
out.csr_indices_.data_ptr<int>(),
gemm2Info_,
buffer_));
return out;
}
};
// Specializacion for `A @ B` operation for float values with cuSparse
template<> struct CusparseMatrixMultiplyOp<float> {
csrgemm2Info_t gemm2Info_;
CusparseMatrixMultiplyOp() {
TORCH_CUDASPARSE_CHECK(hipsparseCreateCsrgemm2Info(&gemm2Info_));
}
~CusparseMatrixMultiplyOp() {
hipsparseDestroyCsrgemm2Info(gemm2Info_);
}
csrOutput operator()(
const ScsrMatrixRef& lhs,
const ScsrMatrixRef& rhs,
Tensor &output_values,
Tensor &output_indices) {
float alpha = 1.0;
ScsrMatrixRef empty;
return Sgemm2(lhs, rhs, empty, &alpha, nullptr, output_values, output_indices);
}
csrOutput Sgemm2(
const ScsrMatrixRef& A,
const ScsrMatrixRef& B,
const ScsrMatrixRef& C,
const float* alpha,
const float* beta,
Tensor &output_values,
Tensor &output_indices) {
void* buffer_{nullptr};
hipsparseHandle_t cusparseHandle_ = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(hipsparseSetPointerMode(cusparseHandle_, HIPSPARSE_POINTER_MODE_HOST));
csrOutput out({A.size(0), B.size(1)});
int innerSize = confirm_mult_size(A.size_, B.size_);
out.csr_pointers_ = at::empty({out.size(0) + 1}, output_indices.options().dtype(kInt));
// Compute needed buffer size
size_t new_bubber_sz;
TORCH_CUDASPARSE_CHECK(hipsparseScsrgemm2_bufferSizeExt(
cusparseHandle_,
out.size(0),
out.size(1),
innerSize,
alpha,
A.description_,
A.nnz_,
A.csr_pointers_,
A.csr_indices_,
B.description_,
B.nnz_,
B.csr_pointers_,
B.csr_indices_,
beta,
C.description_,
C.nnz_,
C.csr_pointers_,
C.csr_indices_,
gemm2Info_,
&new_bubber_sz));
auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
at::DataPtr data_ptr = allocator.allocate(new_bubber_sz);
buffer_ = data_ptr.get();
// Find the resulting non-zero pattern.
TORCH_CUDASPARSE_CHECK(hipsparseXcsrgemm2Nnz(
cusparseHandle_,
out.size(0),
out.size(1),
innerSize,
A.description_,
A.nnz_,
A.csr_pointers_,
A.csr_indices_,
B.description_,
B.nnz_,
B.csr_pointers_,
B.csr_indices_,
C.description_,
C.nnz_,
C.csr_pointers_,
C.csr_indices_,
out.description_,
out.csr_pointers_.data_ptr<int>(),
&out.nnz_,
gemm2Info_,
buffer_));
out.csr_indices_ = at::empty({out.nnz_}, output_indices.options().dtype(kInt));
out.csr_values_ = at::empty({out.nnz_}, output_values.options());
// Perform the gemm2 operation for doubles
// out = alpha A B + beta C
TORCH_CUDASPARSE_CHECK(hipsparseScsrgemm2(
cusparseHandle_,
out.size(0),
out.size(1),
innerSize,
alpha,
A.description_,
A.nnz_,
A.csr_values_,
A.csr_pointers_,
A.csr_indices_,
B.description_,
B.nnz_,
B.csr_values_,
B.csr_pointers_,
B.csr_indices_,
beta,
C.description_,
C.nnz_,
C.csr_values_,
C.csr_pointers_,
C.csr_indices_,
out.description_,
out.csr_values_.data_ptr<float>(),
out.csr_pointers_.data_ptr<int>(),
out.csr_indices_.data_ptr<int>(),
gemm2Info_,
buffer_));
return out;
}
};
#endif // IS_CUSPARSE11_AVAILABLE()
template <typename scalar_t>
void sparse_sparse_matmul_cuda_kernel(
Tensor& result,
const Tensor& mat1,
const Tensor& mat2) {
static_assert(
std::is_same<c10::Half, scalar_t>::value ||
std::is_same<c10::BFloat16, scalar_t>::value ||
std::is_same<float, scalar_t>::value ||
std::is_same<double, scalar_t>::value ||
std::is_same<c10::complex<float>, scalar_t>::value ||
std::is_same<c10::complex<double>, scalar_t>::value,
"sparse_sparse_matmul_cuda_kernel only supports data type of half, bfloat16, float, double and complex float, double.");
// older versions of cusparse on Windows segfault for complex128 dtype
#if defined(_WIN32) && defined(CUSPARSE_VERSION) && CUSPARSE_VERSION < 11400
TORCH_CHECK(
!(mat1.scalar_type() == ScalarType::ComplexDouble),
"Sparse multiplication with complex128 dtype inputs is not supported with current CUDA version. Please upgrade to CUDA Toolkit 11.2.1+");
#endif
Tensor mat1_indices_ = mat1._indices().contiguous();
Tensor mat1_values = mat1._values().contiguous();
Tensor mat1_row_indices = mat1_indices_.select(0, 0);
Tensor mat1_col_indices = mat1_indices_.select(0, 1);
Tensor mat1_indptr = _to_csr_int(mat1_row_indices, mat1.size(0), mat1._nnz());
Tensor mat1_indices = at::empty(
{mat1_col_indices.size(0)}, mat1_col_indices.options().dtype(kInt));
mat1_indices.copy_(mat1_col_indices);
Tensor mat2_indices_ = mat2._indices().contiguous();
Tensor mat2_values = mat2._values().contiguous();
Tensor mat2_row_indices = mat2_indices_.select(0, 0);
Tensor mat2_col_indices = mat2_indices_.select(0, 1);
Tensor mat2_indptr = _to_csr_int(mat2_row_indices, mat2.size(0), mat2._nnz());
Tensor mat2_indices = at::empty({mat2_col_indices.size(0)}, mat2_col_indices.options().dtype(kInt));
mat2_indices.copy_(mat2_col_indices);
auto m = mat1.size(0);
auto k1 = mat1.size(1);
auto k2 = mat2.size(0);
auto n = mat2.size(1);
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k1 <= INT_MAX),
"At the moment, hipsparseDcsrgemm2 only supports m, n, k, nnz with the bound [val] <= ", INT_MAX, ".",
"If you need this, please file an issue on GitHub."
);
auto output_indices = result._indices();
auto output_values = result._values();
if ((k1 == 0 && k2 == 0) || (n == 0 && m == 0)) {
output_indices.zero_();
output_values.zero_();
return;
}
csrMatrixRef<scalar_t> csr_mat1(
mat1_indices.data_ptr<int>(),
mat1_indptr.data_ptr<int>(),
mat1_values.data_ptr<scalar_t>(),
(int)mat1._nnz(),
{(int)mat1.size(0), (int)mat1.size(1)});
csrMatrixRef<scalar_t> csr_mat2(
mat2_indices.data_ptr<int>(),
mat2_indptr.data_ptr<int>(),
mat2_values.data_ptr<scalar_t>(),
(int)mat2._nnz(),
{(int)mat2.size(0), (int)mat2.size(1)});
// Sparse matrix multiplication
CusparseMatrixMultiplyOp<scalar_t> op;
csrOutput csr_output = op(csr_mat1, csr_mat2, output_values, output_indices);
auto nnz = csr_output.nnz_;
output_values.set_(csr_output.csr_values_);
output_indices.resize_({2, nnz});
auto output_indices_accessor = output_indices.packed_accessor64<int64_t, 2>();
auto csr_output_pointers_accessor =
csr_output.csr_pointers_.packed_accessor64<int, 1>();
auto csr_output_ind_accessor =
csr_output.csr_indices_.packed_accessor64<int, 1>();
auto major_dim = result.size(0);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
at::cuda::ThrustAllocator allocator;
auto policy = thrust::hip::par(allocator).on(stream);
// Filling the COO row indices
thrust::for_each(
policy,
thrust::make_counting_iterator(int64_t(0)),
thrust::make_counting_iterator(int64_t(major_dim)),
[output_indices_accessor,
csr_output_pointers_accessor,
major_dim,
nnz] __device__(int64_t i) {
auto Ap = csr_output_pointers_accessor.data();
int64_t* indices_row = output_indices_accessor[0].data();
for (int jj = Ap[i]; jj < Ap[i + 1]; jj++) {
indices_row[jj] = i;
}
});
// Filling the COO column indices
thrust::for_each(
policy,
thrust::make_counting_iterator(int64_t(0)),
thrust::make_counting_iterator(int64_t(csr_output.nnz_)),
[output_indices_accessor,
csr_output_pointers_accessor,
csr_output_ind_accessor,
major_dim,
nnz] __device__(int64_t i) {
int64_t* indices_col = output_indices_accessor[1].data();
indices_col[i] = csr_output_ind_accessor[i];
});
}
} // end anonymous namespace
Tensor sparse_sparse_matmul_cuda(const Tensor& mat1_, const Tensor& mat2_) {
TORCH_INTERNAL_ASSERT(mat1_.is_sparse());
TORCH_INTERNAL_ASSERT(mat2_.is_sparse());
TORCH_CHECK(mat1_.dim() == 2);
TORCH_CHECK(mat2_.dim() == 2);
TORCH_CHECK(mat1_.dense_dim() == 0, "sparse_mm: scalar values expected, mat1 got ", mat1_.dense_dim(), "D values");
TORCH_CHECK(mat2_.dense_dim() == 0, "sparse_mm: scalar values expected, mat2 got ", mat2_.dense_dim(), "D values");
TORCH_CHECK(
mat1_.size(1) == mat2_.size(0), "mat1 and mat2 shapes cannot be multiplied (",
mat1_.size(0), "x", mat1_.size(1), " and ", mat2_.size(0), "x", mat2_.size(1), ")");
TORCH_CHECK(mat1_.scalar_type() == mat2_.scalar_type(),
"mat1 dtype ", mat1_.scalar_type(), " does not match mat2 dtype ", mat2_.scalar_type());
auto output = at::native::empty_like(mat1_);
output.sparse_resize_and_clear_({mat1_.size(0), mat2_.size(1)}, mat1_.sparse_dim(), 0);
#if IS_CUSPARSE11_AVAILABLE()
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, mat1_.scalar_type(), "sparse_matmul", [&] {
sparse_sparse_matmul_cuda_kernel<scalar_t>(output, mat1_.coalesce(), mat2_.coalesce());
});
#else
AT_DISPATCH_FLOATING_TYPES(mat1_.scalar_type(), "sparse_matmul", [&] {
sparse_sparse_matmul_cuda_kernel<scalar_t>(output, mat1_.coalesce(), mat2_.coalesce());
});
#endif
return output;
}
} // namespace native
} // namespace at
| 5332bc074fbdc327fac3985a56188b45e9f4ead9.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Config.h>
#include <ATen/Dispatch.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/Parallel.h>
#include <ATen/SparseTensorImpl.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/Resize.h>
#include <cuda_runtime.h>
#include <type_traits>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_sparse_sparse_matmul_native.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_like_native.h>
#endif
#include <thrust/device_ptr.h>
#include <thrust/for_each.h>
#include <thrust/sequence.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDADataType.h>
#include <ATen/cuda/CUDAUtils.h>
#include <ATen/cuda/ThrustAllocator.h>
#include <cusparse.h>
#include <ATen/native/sparse/cuda/SparseCUDABlas.h>
#include <c10/cuda/CUDACachingAllocator.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/functional.h>
#include <thrust/binary_search.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/discard_iterator.h>
#if defined(__CUDACC__) && (CUSPARSE_VERSION >= 11000)
#define IS_CUSPARSE11_AVAILABLE() 1
#else
#define IS_CUSPARSE11_AVAILABLE() 0
#endif
#if IS_CUSPARSE11_AVAILABLE()
#include <library_types.h>
#endif
namespace at {
namespace native {
namespace {
using namespace at::sparse;
Tensor _to_csr_int(const Tensor& rowIndices, int64_t dim, int64_t nnz) {
Tensor csr = at::empty({dim + 1}, CUDA(kInt));
Tensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(
rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>());
return csr;
}
#pragma push
// NVCC complains that confirm_mult_size is not used,
// but it is used in specializations of CusparseMatrixMultiplyOp below
#pragma diag_suppress 177 // Function was declared but never referenced
int confirm_mult_size(const std::vector<int>& mat1_size, const std::vector<int>& mat2_size) {
TORCH_CHECK(
mat1_size[1] == mat2_size[0],
"mat1 and mat2 shapes cannot be multiplied (",
mat1_size[0],
"x",
mat1_size[1],
" and ",
mat2_size[0],
"x",
mat2_size[1],
")");
return mat1_size[1];
}
#pragma pop
void create_general_description_(cusparseMatDescr_t& description_) {
TORCH_CUDASPARSE_CHECK(cusparseCreateMatDescr(&description_));
TORCH_CUDASPARSE_CHECK(cusparseSetMatType(description_, CUSPARSE_MATRIX_TYPE_GENERAL));
TORCH_CUDASPARSE_CHECK(cusparseSetMatIndexBase(description_, CUSPARSE_INDEX_BASE_ZERO));
}
// csrMatrixRef is used to have a representation of a raw CSR matrix representation
// comming from `sparse_sparse_matmul_cuda_kernel` function.
// Moreover this implements a RAII guard for a cusparse descriptor
template<class scalar_t>
struct csrMatrixRef {
int* csr_indices_{nullptr};
int* csr_pointers_{nullptr};
scalar_t* csr_values_{nullptr};
int nnz_{0};
std::vector<int> size_{};
#if IS_CUSPARSE11_AVAILABLE()
cusparseSpMatDescr_t description_{0};
#else
cusparseMatDescr_t description_{0};
#endif
csrMatrixRef() {
#if !IS_CUSPARSE11_AVAILABLE()
create_general_description_(description_);
#endif
}
csrMatrixRef(
int* csr_indices,
int* csr_pointers,
scalar_t* csr_values,
int nnz,
const std::vector<int>& size)
: csr_indices_{csr_indices},
csr_pointers_{csr_pointers},
csr_values_{csr_values},
nnz_{nnz},
size_{size} {
#if IS_CUSPARSE11_AVAILABLE()
cudaDataType cuda_data_type = at::cuda::getCudaDataType<scalar_t>();
TORCH_CUDASPARSE_CHECK(cusparseCreateCsr(
&description_,
this->size(0),
this->size(1),
this->nnz_,
this->csr_pointers_,
this->csr_indices_,
this->csr_values_,
CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO,
cuda_data_type));
#else
create_general_description_(description_);
#endif
}
~csrMatrixRef() {
#if IS_CUSPARSE11_AVAILABLE()
cusparseDestroySpMat(description_);
#else
cusparseDestroyMatDescr(description_);
#endif
}
int size(int index) const {
return size_.at(index);
}
};
// csrOutput is used to represent the output for `CusparseMatrixMultiplyOp`
// Note that `csrOutput` is different from `csrMatrixRef` and the purpose
// of this was to have a materialized version of a CSR matrix.
// Moreover this implements a RAII guard for a cusparse descriptor
struct csrOutput {
Tensor csr_indices_{};
Tensor csr_pointers_{};
at::Tensor csr_values_{};
int nnz_{0};
std::vector<int> size_;
cusparseMatDescr_t description_{0};
csrOutput(const std::vector<int> &size) : size_{size} {
create_general_description_(description_);
}
~csrOutput() {
cusparseDestroyMatDescr(description_);
}
int size(int index) const {
return size_.at(index);
}
};
#if IS_CUSPARSE11_AVAILABLE()
// RAII guard helps to support cuSparse 11 API for `A @ B` operation
// This generic template exists because with cuSparse the `scalar_t` type could be a double or float
template <class scalar_t>
struct CusparseMatrixMultiplyOp {
cusparseSpGEMMDescr_t spgemmDesc;
CusparseMatrixMultiplyOp() {
static_assert(
std::is_same<c10::Half, scalar_t>::value ||
std::is_same<c10::BFloat16, scalar_t>::value ||
std::is_same<float, scalar_t>::value ||
std::is_same<double, scalar_t>::value ||
std::is_same<c10::complex<float>, scalar_t>::value ||
std::is_same<c10::complex<double>, scalar_t>::value,
"cusparseSpGEMM only supports data type of half, bfloat16, float, double and complex float, double.");
// SpGEMM Computation
TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_createDescr(&spgemmDesc));
}
~CusparseMatrixMultiplyOp() {
// destroy matrix/vector descriptors
cusparseSpGEMM_destroyDescr(spgemmDesc);
}
csrOutput operator ()(
const csrMatrixRef<scalar_t>& A,
const csrMatrixRef<scalar_t>& B,
Tensor& output_values,
Tensor& output_indices) {
const int A_num_rows = A.size(0);
const int B_num_cols = B.size(1);
csrOutput out({A.size(0), B.size(1)});
out.csr_pointers_ = at::empty({out.size(0) + 1}, output_indices.options().dtype(kInt));
int* dC_csrOffsets = out.csr_pointers_.data_ptr<int>();
int* dC_columns = nullptr;
scalar_t* dC_values = nullptr;
scalar_t alpha = 1.0f;
scalar_t beta = 0.0f;
cusparseOperation_t opA = CUSPARSE_OPERATION_NON_TRANSPOSE;
cusparseOperation_t opB = CUSPARSE_OPERATION_NON_TRANSPOSE;
csrMatrixRef<scalar_t> C(
nullptr,
nullptr,
nullptr,
/*nnz*/0,
{A_num_rows, B_num_cols}
);
//--------------------------------------------------------------------------
// CUSPARSE APIs
cusparseHandle_t handle = at::cuda::getCurrentCUDASparseHandle();
void *dBuffer1 = NULL, *dBuffer2 = NULL;
size_t bufferSize1 = 0, bufferSize2 = 0;
cusparseSpMatDescr_t matA = A.description_;
cusparseSpMatDescr_t matB = B.description_;
cusparseSpMatDescr_t matC = C.description_;
//--------------------------------------------------------------------------
cudaDataType computeType = at::cuda::getCudaDataType<scalar_t>();
// If a specific GPU model does not provide native support for a given data type,
// the routine returns CUSPARSE_STATUS_ARCH_MISMATCH error
cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
TORCH_CHECK(prop->major >= 5 && !((10*prop->major + prop->minor) < 53 && computeType == CUDA_R_16F),
"sparse_mm: CUDA Float16 requires compute capability >= 53 (current: ", prop->major, prop->minor, ")");
TORCH_CHECK(!(prop->major < 8 && computeType == CUDA_R_16BF),
"sparse_mm: CUDA BFloat16 requires compute capability >= 80 (current: ", prop->major, prop->minor, ")");
// ask bufferSize1 bytes for external memory
TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_workEstimation(
handle,
opA,
opB,
&alpha,
matA,
matB,
&beta,
matC,
computeType,
CUSPARSE_SPGEMM_DEFAULT,
spgemmDesc,
&bufferSize1,
NULL));
auto& allocator = *::c10::cuda::CUDACachingAllocator::get();
at::DataPtr dataPtr1 = allocator.allocate(bufferSize1);
dBuffer1 = dataPtr1.get();
// inspect the matrices A and B to understand the memory requiremnent for
// the next step
TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_workEstimation(
handle,
opA,
opB,
&alpha,
matA,
matB,
&beta,
matC,
computeType,
CUSPARSE_SPGEMM_DEFAULT,
spgemmDesc,
&bufferSize1,
dBuffer1));
// ask bufferSize2 bytes for external memory
TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_compute(
handle,
opA,
opB,
&alpha,
matA,
matB,
&beta,
matC,
computeType,
CUSPARSE_SPGEMM_DEFAULT,
spgemmDesc,
&bufferSize2,
NULL));
at::DataPtr dataPtr2 = allocator.allocate(bufferSize2);
dBuffer2 = dataPtr2.get();
// compute the intermediate product of A * B
TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_compute(
handle,
opA,
opB,
&alpha,
matA,
matB,
&beta,
matC,
computeType,
CUSPARSE_SPGEMM_DEFAULT,
spgemmDesc,
&bufferSize2,
dBuffer2));
// get matrix C non-zero entries C_num_nnz1
int64_t C_num_rows1, C_num_cols1, C_num_nnz1;
TORCH_CUDASPARSE_CHECK(
cusparseSpMatGetSize(matC, &C_num_rows1, &C_num_cols1, &C_num_nnz1));
// allocate matrix C
// allocate C offsets
out.nnz_ = C_num_nnz1;
out.csr_indices_ = at::empty({out.nnz_}, output_indices.options().dtype(kInt));
out.csr_values_ = at::empty({out.nnz_}, output_values.options());
dC_columns = out.csr_indices_.data_ptr<int>();
dC_values = out.csr_values_.data_ptr<scalar_t>();
// update matC with the new pointers
TORCH_CUDASPARSE_CHECK(
cusparseCsrSetPointers(matC, dC_csrOffsets, dC_columns, dC_values));
// copy the final products to the matrix C
TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_copy(
handle,
opA,
opB,
&alpha,
matA,
matB,
&beta,
matC,
computeType,
CUSPARSE_SPGEMM_DEFAULT,
spgemmDesc));
return out;
}
};
template struct CusparseMatrixMultiplyOp<float>;
template struct CusparseMatrixMultiplyOp<double>;
#else // if not IS_CUSPARSE11_AVAILABLE()
using DcsrMatrixRef = csrMatrixRef<double>;
using ScsrMatrixRef = csrMatrixRef<float>;
// RAII guard helps to support cuSparse 10 API for `A @ B` operation
// This generic template exists because with cuSparse the `scalar_t` type could be a double or float
template <class scalar_t>
struct CusparseMatrixMultiplyOp {
csrOutput operator()(
const csrMatrixRef<scalar_t>& lhs,
const csrMatrixRef<scalar_t>& rhs,
Tensor &output_values,
Tensor &output_indices)
{
TORCH_INTERNAL_ASSERT(false, "cusparse csr sparse-sparse MM only supports data type of float and double.");
}
};
// Specializacion for `A @ B` operation for double values with cuSparse
template<> struct CusparseMatrixMultiplyOp<double> {
csrgemm2Info_t gemm2Info_;
CusparseMatrixMultiplyOp() {
TORCH_CUDASPARSE_CHECK(cusparseCreateCsrgemm2Info(&gemm2Info_));
}
~CusparseMatrixMultiplyOp() {
cusparseDestroyCsrgemm2Info(gemm2Info_);
}
csrOutput operator ()(
const DcsrMatrixRef& lhs,
const DcsrMatrixRef& rhs,
Tensor &output_values,
Tensor &output_indices) {
double alpha = 1.0;
DcsrMatrixRef empty;
return Dgemm2(lhs, rhs, empty, &alpha, nullptr, output_values, output_indices);
}
csrOutput Dgemm2(
const DcsrMatrixRef& A,
const DcsrMatrixRef& B,
const DcsrMatrixRef& C,
const double* alpha,
const double* beta,
Tensor &output_values,
Tensor &output_indices) {
void* buffer_{nullptr};
cusparseHandle_t cusparseHandle_ = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(cusparseSetPointerMode(cusparseHandle_, CUSPARSE_POINTER_MODE_HOST));
csrOutput out({A.size(0), B.size(1)});
int innerSize = confirm_mult_size(A.size_, B.size_);
out.csr_pointers_ = at::empty({out.size(0) + 1}, output_indices.options().dtype(kInt));
// Compute needed buffer size
size_t new_bubber_sz;
TORCH_CUDASPARSE_CHECK(cusparseDcsrgemm2_bufferSizeExt(
cusparseHandle_,
out.size(0),
out.size(1),
innerSize,
alpha,
A.description_,
A.nnz_,
A.csr_pointers_,
A.csr_indices_,
B.description_,
B.nnz_,
B.csr_pointers_,
B.csr_indices_,
beta,
C.description_,
C.nnz_,
C.csr_pointers_,
C.csr_indices_,
gemm2Info_,
&new_bubber_sz));
// (Re)allocate buffer if needed
auto& allocator = *::c10::cuda::CUDACachingAllocator::get();
at::DataPtr data_ptr = allocator.allocate(new_bubber_sz);
buffer_ = data_ptr.get();
// Find the resulting non-zero pattern.
TORCH_CUDASPARSE_CHECK(cusparseXcsrgemm2Nnz(
cusparseHandle_,
out.size(0),
out.size(1),
innerSize,
A.description_,
A.nnz_,
A.csr_pointers_,
A.csr_indices_,
B.description_,
B.nnz_,
B.csr_pointers_,
B.csr_indices_,
C.description_,
C.nnz_,
C.csr_pointers_,
C.csr_indices_,
out.description_,
out.csr_pointers_.data_ptr<int>(),
&out.nnz_,
gemm2Info_,
buffer_));
out.csr_indices_ = at::empty({out.nnz_}, output_indices.options().dtype(kInt));
out.csr_values_ = at::empty({out.nnz_}, output_values.options());
// Perform the gemm2 operation for doubles
// out = alpha ∗ A ∗ B + beta ∗ C
TORCH_CUDASPARSE_CHECK(cusparseDcsrgemm2(
cusparseHandle_,
out.size(0),
out.size(1),
innerSize,
alpha,
A.description_,
A.nnz_,
A.csr_values_,
A.csr_pointers_,
A.csr_indices_,
B.description_,
B.nnz_,
B.csr_values_,
B.csr_pointers_,
B.csr_indices_,
beta,
C.description_,
C.nnz_,
C.csr_values_,
C.csr_pointers_,
C.csr_indices_,
out.description_,
out.csr_values_.data_ptr<double>(),
out.csr_pointers_.data_ptr<int>(),
out.csr_indices_.data_ptr<int>(),
gemm2Info_,
buffer_));
return out;
}
};
// Specializacion for `A @ B` operation for float values with cuSparse
template<> struct CusparseMatrixMultiplyOp<float> {
csrgemm2Info_t gemm2Info_;
CusparseMatrixMultiplyOp() {
TORCH_CUDASPARSE_CHECK(cusparseCreateCsrgemm2Info(&gemm2Info_));
}
~CusparseMatrixMultiplyOp() {
cusparseDestroyCsrgemm2Info(gemm2Info_);
}
csrOutput operator()(
const ScsrMatrixRef& lhs,
const ScsrMatrixRef& rhs,
Tensor &output_values,
Tensor &output_indices) {
float alpha = 1.0;
ScsrMatrixRef empty;
return Sgemm2(lhs, rhs, empty, &alpha, nullptr, output_values, output_indices);
}
csrOutput Sgemm2(
const ScsrMatrixRef& A,
const ScsrMatrixRef& B,
const ScsrMatrixRef& C,
const float* alpha,
const float* beta,
Tensor &output_values,
Tensor &output_indices) {
void* buffer_{nullptr};
cusparseHandle_t cusparseHandle_ = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(cusparseSetPointerMode(cusparseHandle_, CUSPARSE_POINTER_MODE_HOST));
csrOutput out({A.size(0), B.size(1)});
int innerSize = confirm_mult_size(A.size_, B.size_);
out.csr_pointers_ = at::empty({out.size(0) + 1}, output_indices.options().dtype(kInt));
// Compute needed buffer size
size_t new_bubber_sz;
TORCH_CUDASPARSE_CHECK(cusparseScsrgemm2_bufferSizeExt(
cusparseHandle_,
out.size(0),
out.size(1),
innerSize,
alpha,
A.description_,
A.nnz_,
A.csr_pointers_,
A.csr_indices_,
B.description_,
B.nnz_,
B.csr_pointers_,
B.csr_indices_,
beta,
C.description_,
C.nnz_,
C.csr_pointers_,
C.csr_indices_,
gemm2Info_,
&new_bubber_sz));
auto& allocator = *::c10::cuda::CUDACachingAllocator::get();
at::DataPtr data_ptr = allocator.allocate(new_bubber_sz);
buffer_ = data_ptr.get();
// Find the resulting non-zero pattern.
TORCH_CUDASPARSE_CHECK(cusparseXcsrgemm2Nnz(
cusparseHandle_,
out.size(0),
out.size(1),
innerSize,
A.description_,
A.nnz_,
A.csr_pointers_,
A.csr_indices_,
B.description_,
B.nnz_,
B.csr_pointers_,
B.csr_indices_,
C.description_,
C.nnz_,
C.csr_pointers_,
C.csr_indices_,
out.description_,
out.csr_pointers_.data_ptr<int>(),
&out.nnz_,
gemm2Info_,
buffer_));
out.csr_indices_ = at::empty({out.nnz_}, output_indices.options().dtype(kInt));
out.csr_values_ = at::empty({out.nnz_}, output_values.options());
// Perform the gemm2 operation for doubles
// out = alpha ∗ A ∗ B + beta ∗ C
TORCH_CUDASPARSE_CHECK(cusparseScsrgemm2(
cusparseHandle_,
out.size(0),
out.size(1),
innerSize,
alpha,
A.description_,
A.nnz_,
A.csr_values_,
A.csr_pointers_,
A.csr_indices_,
B.description_,
B.nnz_,
B.csr_values_,
B.csr_pointers_,
B.csr_indices_,
beta,
C.description_,
C.nnz_,
C.csr_values_,
C.csr_pointers_,
C.csr_indices_,
out.description_,
out.csr_values_.data_ptr<float>(),
out.csr_pointers_.data_ptr<int>(),
out.csr_indices_.data_ptr<int>(),
gemm2Info_,
buffer_));
return out;
}
};
#endif // IS_CUSPARSE11_AVAILABLE()
template <typename scalar_t>
void sparse_sparse_matmul_cuda_kernel(
Tensor& result,
const Tensor& mat1,
const Tensor& mat2) {
static_assert(
std::is_same<c10::Half, scalar_t>::value ||
std::is_same<c10::BFloat16, scalar_t>::value ||
std::is_same<float, scalar_t>::value ||
std::is_same<double, scalar_t>::value ||
std::is_same<c10::complex<float>, scalar_t>::value ||
std::is_same<c10::complex<double>, scalar_t>::value,
"sparse_sparse_matmul_cuda_kernel only supports data type of half, bfloat16, float, double and complex float, double.");
// older versions of cusparse on Windows segfault for complex128 dtype
#if defined(_WIN32) && defined(CUSPARSE_VERSION) && CUSPARSE_VERSION < 11400
TORCH_CHECK(
!(mat1.scalar_type() == ScalarType::ComplexDouble),
"Sparse multiplication with complex128 dtype inputs is not supported with current CUDA version. Please upgrade to CUDA Toolkit 11.2.1+");
#endif
Tensor mat1_indices_ = mat1._indices().contiguous();
Tensor mat1_values = mat1._values().contiguous();
Tensor mat1_row_indices = mat1_indices_.select(0, 0);
Tensor mat1_col_indices = mat1_indices_.select(0, 1);
Tensor mat1_indptr = _to_csr_int(mat1_row_indices, mat1.size(0), mat1._nnz());
Tensor mat1_indices = at::empty(
{mat1_col_indices.size(0)}, mat1_col_indices.options().dtype(kInt));
mat1_indices.copy_(mat1_col_indices);
Tensor mat2_indices_ = mat2._indices().contiguous();
Tensor mat2_values = mat2._values().contiguous();
Tensor mat2_row_indices = mat2_indices_.select(0, 0);
Tensor mat2_col_indices = mat2_indices_.select(0, 1);
Tensor mat2_indptr = _to_csr_int(mat2_row_indices, mat2.size(0), mat2._nnz());
Tensor mat2_indices = at::empty({mat2_col_indices.size(0)}, mat2_col_indices.options().dtype(kInt));
mat2_indices.copy_(mat2_col_indices);
auto m = mat1.size(0);
auto k1 = mat1.size(1);
auto k2 = mat2.size(0);
auto n = mat2.size(1);
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k1 <= INT_MAX),
"At the moment, cusparseDcsrgemm2 only supports m, n, k, nnz with the bound [val] <= ", INT_MAX, ".",
"If you need this, please file an issue on GitHub."
);
auto output_indices = result._indices();
auto output_values = result._values();
if ((k1 == 0 && k2 == 0) || (n == 0 && m == 0)) {
output_indices.zero_();
output_values.zero_();
return;
}
csrMatrixRef<scalar_t> csr_mat1(
mat1_indices.data_ptr<int>(),
mat1_indptr.data_ptr<int>(),
mat1_values.data_ptr<scalar_t>(),
(int)mat1._nnz(),
{(int)mat1.size(0), (int)mat1.size(1)});
csrMatrixRef<scalar_t> csr_mat2(
mat2_indices.data_ptr<int>(),
mat2_indptr.data_ptr<int>(),
mat2_values.data_ptr<scalar_t>(),
(int)mat2._nnz(),
{(int)mat2.size(0), (int)mat2.size(1)});
// Sparse matrix multiplication
CusparseMatrixMultiplyOp<scalar_t> op;
csrOutput csr_output = op(csr_mat1, csr_mat2, output_values, output_indices);
auto nnz = csr_output.nnz_;
output_values.set_(csr_output.csr_values_);
output_indices.resize_({2, nnz});
auto output_indices_accessor = output_indices.packed_accessor64<int64_t, 2>();
auto csr_output_pointers_accessor =
csr_output.csr_pointers_.packed_accessor64<int, 1>();
auto csr_output_ind_accessor =
csr_output.csr_indices_.packed_accessor64<int, 1>();
auto major_dim = result.size(0);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
at::cuda::ThrustAllocator allocator;
auto policy = thrust::cuda::par(allocator).on(stream);
// Filling the COO row indices
thrust::for_each(
policy,
thrust::make_counting_iterator(int64_t(0)),
thrust::make_counting_iterator(int64_t(major_dim)),
[output_indices_accessor,
csr_output_pointers_accessor,
major_dim,
nnz] __device__(int64_t i) {
auto Ap = csr_output_pointers_accessor.data();
int64_t* indices_row = output_indices_accessor[0].data();
for (int jj = Ap[i]; jj < Ap[i + 1]; jj++) {
indices_row[jj] = i;
}
});
// Filling the COO column indices
thrust::for_each(
policy,
thrust::make_counting_iterator(int64_t(0)),
thrust::make_counting_iterator(int64_t(csr_output.nnz_)),
[output_indices_accessor,
csr_output_pointers_accessor,
csr_output_ind_accessor,
major_dim,
nnz] __device__(int64_t i) {
int64_t* indices_col = output_indices_accessor[1].data();
indices_col[i] = csr_output_ind_accessor[i];
});
}
} // end anonymous namespace
Tensor sparse_sparse_matmul_cuda(const Tensor& mat1_, const Tensor& mat2_) {
TORCH_INTERNAL_ASSERT(mat1_.is_sparse());
TORCH_INTERNAL_ASSERT(mat2_.is_sparse());
TORCH_CHECK(mat1_.dim() == 2);
TORCH_CHECK(mat2_.dim() == 2);
TORCH_CHECK(mat1_.dense_dim() == 0, "sparse_mm: scalar values expected, mat1 got ", mat1_.dense_dim(), "D values");
TORCH_CHECK(mat2_.dense_dim() == 0, "sparse_mm: scalar values expected, mat2 got ", mat2_.dense_dim(), "D values");
TORCH_CHECK(
mat1_.size(1) == mat2_.size(0), "mat1 and mat2 shapes cannot be multiplied (",
mat1_.size(0), "x", mat1_.size(1), " and ", mat2_.size(0), "x", mat2_.size(1), ")");
TORCH_CHECK(mat1_.scalar_type() == mat2_.scalar_type(),
"mat1 dtype ", mat1_.scalar_type(), " does not match mat2 dtype ", mat2_.scalar_type());
auto output = at::native::empty_like(mat1_);
output.sparse_resize_and_clear_({mat1_.size(0), mat2_.size(1)}, mat1_.sparse_dim(), 0);
#if IS_CUSPARSE11_AVAILABLE()
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, mat1_.scalar_type(), "sparse_matmul", [&] {
sparse_sparse_matmul_cuda_kernel<scalar_t>(output, mat1_.coalesce(), mat2_.coalesce());
});
#else
AT_DISPATCH_FLOATING_TYPES(mat1_.scalar_type(), "sparse_matmul", [&] {
sparse_sparse_matmul_cuda_kernel<scalar_t>(output, mat1_.coalesce(), mat2_.coalesce());
});
#endif
return output;
}
} // namespace native
} // namespace at
|
6f68f806155aeaa258c131d7cbfd987457232dc5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<mat_opt.h>
#include<stdio.h>
#include<fstream>
#include<stdint.h>
#include<assert.h>
#include<time.h>
#include<unistd.h>
#include<cuda_runtime.h>
#include<rocblas.h>
#include<thrust/sort.h>
#include"device_launch_parameters.h"
#include<helper_cuda.h>
#include<helper_functions.h>
#define MAX_DEPTH 16
#define INSERTION_SORT 32
using namespace cv;
__device__ void Memcpy(double *im, double *data, int row, int col, int r_c, int r_g, int n);
__device__ void selection_sort(double *data, int left, int right);
__device__ void simple_quicksort(double *data, int left, int right, int depth);
__global__ void lognormal_mixture(double *im, int r_c, int r_g, int k, double Pf, int m, int n)
{
/***********************************************************************
Ship detection based on lognormal mixture models
INPUT
im: padding SAR density image
r_c: radius of the reference window
r_g: radius of the guard area
K :number of components
Pf: false alarm rate
m : number of rows of input image
n : number of columns of input image
OUTPUT
im_prob: the cdf of simulate distribution with the im value
*************************************************************************/
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int size = r_c*r_c - r_g*r_g;
for(int i = 0;i<size;i++)
{
// data[i] = 1.0;
// printf("%.1f\n", data[i]);
}
}
__global__ void malloc_global(double **a)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if(i==10&&j==10)
{
int N = 10000;
printf("ok");
hipMalloc((void**)a, sizeof(double)*N);
for(int i=0;i<N;i++)
{
(*a)[i] = i;
}
}
__syncthreads();
if(i==11&&j==11)
{
printf("%f\n",(*a)[500]);
}
}
__global__ void CFAR_Gamma(double *im, double *T, int r_c, int r_g, int m, int n) {
// n_pad n
int row = threadIdx.x + blockDim.x * blockIdx.x;
int col = threadIdx.y + blockDim.y * blockIdx.y;
int size = (r_c*r_c-r_g*r_g)*4;int n_pad = n + 2*r_c;
double clutter_sum = 0, I_C = 0, I = 0, *clutter;
__shared__ double data[4600];
if(row < m && col < n)
{
int index = threadIdx.x + threadIdx.y*blockDim.x;
row = row + r_c; col = col + r_c; //
clutter = &data[index*size];
Memcpy(im, clutter, row, col, r_c, r_g, n_pad);
simple_quicksort(clutter, 0, size-1, 0);
int number = size * 0.65;
for(int i = 0; i< number; i++)
{
clutter_sum += clutter[i];
}
I_C = clutter_sum/number;
for(int i = row-1; i <= row+1;i++)
{
for(int j = col-1;j <= col+1;j++)
{
I += im[i*n_pad+col];
}
}
I = I/9;
T[(row-r_c)*n+(col-r_c)] = I/I_C;
// double *a;
// hipMalloc((void **)&a,sizeof(double)*1000);
// for(int i=0;i<1000;i++)
// {
// a[i] = i;
// if(i==50)
// {printf("ok");printf("%f ", a[i]);}
// }
if(row==30&&col==30)
{
// for(int i=0;i<size;i++)
// {
// printf("%f ", clutter[i]);
// }
// printf("ok");
}
// hipFree(a);
}
}
__device__ void Memcpy(double *im, double *data, int row, int col, int r_c, int r_g, int n)
{
// 5x30
int index = 0;
for(int i = row-r_c;i<row-r_g;i++)
{
for(int j=col-r_c;j<=col+r_c;j++)
{
data[index] = im[i*n+j];
index += 1;
}
}
// 5x30
for(int i = row+r_g+1;i<=row+r_c;i++)
{
for(int j=col-r_c;j<=col+r_c;j++)
{
data[index] = im[i*n+j];
index += 1;
}
}
//20x5
for(int i = row-r_g;i<=row+r_g;i++)
{
for(int j = col-r_c;j<col-r_g;j++)
{
data[index] = im[i*n+j];
index += 1;
}
}
//20x5
for(int i = row-r_g;i<=row+r_g;i++)
{
for(int j = col+r_g+1;j<=col+r_c;j++)
{
data[index] = im[i*n+j];
index += 1;
}
}
}
__device__ void selection_sort(double *data, int left, int right)
{
for(int i = left; i <= right; i++)
{
double min_val = data[i];
int min_idx = i;
for(int j = i+1; j <= right; j++)
{
double val_j = data[j];
if(val_j < min_val)
{
min_idx = j;
min_val = val_j;
}
}
if(i != min_idx)
{
data[min_idx] = data[i];
data[i] = min_val;
}
}
}
__device__ void simple_quicksort(double *data, int left, int right, int depth)
{
if (depth >= MAX_DEPTH || right-left <= INSERTION_SORT)
{
selection_sort(data, left, right);
return;
}
double *lptr = &data[left];
double *rptr = &data[right];
double pivot = data[(left+right)/2];
while(lptr <= rptr)
{
double lval = *lptr;
double rval = *rptr;
while(lval < pivot)
{
lptr++;
lval = *lptr;
}
while(rval > pivot)
{
rptr--;
rval = *rptr;
}
if(lptr <= rptr)
{
*lptr++ = rval;
*rptr-- = lval;
}
}
int nright = rptr - data;
int nleft = lptr - data;
if (left < (rptr-data))
{
simple_quicksort(data, left, nright, depth+1);
}
// Launch a new block to sort the right part.
if ((lptr-data) < right)
{
simple_quicksort(data, nleft, right, depth+1);
}
}
int main(int argc, char *argv[])
{
double **im, *im_pad, *im_dev, *T, *result, threshold;
int ch, opt_index, channels,m,n; // opt_indexlong_options
const char *optstring = "d:c:g:";
int r_c = 15, r_g = 10;threshold = 4.7;
dim3D arraydim;
const char *filename = "../data/data.bin";
clock_t start,end;
start = clock();
static struct option long_options[] = {
{"rc", required_argument, NULL,'c'},
{"rg", required_argument, NULL,'g'}
};
while((ch = getopt_long(argc, argv, optstring, long_options, &opt_index)) != -1)
{
switch(ch)
{
case 'd':
filename = optarg; break;
case 'c':
r_c = atoi(optarg); break;
case 'g':
r_g = atoi(optarg); break;
case '?':
cout<<"Unknown option: "<<(char)optopt<<endl;
break;
}
}
int deviceCount;
hipGetDeviceCount(&deviceCount);
for(int i=0;i<deviceCount;i++)
{
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp,i);
cout << "GPU device:" << i << ": " << devProp.name <<endl;
cout << "global memory: " << devProp.totalGlobalMem / 1024 / 1024 << "MB" <<endl;
cout << "SM number:" << devProp.multiProcessorCount <<endl;
cout << "shared memory:" << (devProp.sharedMemPerBlock / 1024.0) <<"KB"<<endl;
cout << "block max_thread:" << devProp.maxThreadsPerBlock <<endl;
cout << "registers per Block:" << devProp.regsPerBlock <<endl;
cout << "SM max theads:" << devProp.maxThreadsPerMultiProcessor <<endl;
printf("GPU device has compute capabilities (SM %d.%d)\n", devProp.major, devProp.minor);
cout << "======================================================" <<endl;
}
ifstream infile(filename, ios::in | ios::binary);
infile.read((char *)&channels, sizeof(int));
infile.read((char *)&arraydim.m,sizeof(size_t));
infile.read((char *)&arraydim.n,sizeof(size_t));
m = (int)arraydim.m; n = (int)arraydim.n;
im = new double *[m];
for(int i=0;i<m;i++)
{
im[i] = new double[n];
for(int j=0;j<n;j++)
{
infile.read((char *)&im[i][j], sizeof(double));
}
}
Mat image = ArrayToImage(im, arraydim);
Mat origin_image = ArrayToMat(im, arraydim);
// double data[3][3] = { {1,2,3},{4,5,6},{7,8,9} };
Mat pad_image = PadArray(origin_image,r_c,r_c);
im_pad = pad_image.ptr<double>(0);
int row_pad = pad_image.rows;int col_pad = pad_image.cols;
dim3 blockdim(3,3);
dim3 griddim((m+blockdim.x-1)/blockdim.x , (n+blockdim.y-1)/blockdim.y);
checkCudaErrors(hipMalloc((void**)&im_dev, sizeof(double)*row_pad*col_pad));
checkCudaErrors(hipMalloc((void**)&T, sizeof(double)*m*n));
checkCudaErrors(hipMemcpy(im_dev, im_pad, sizeof(double)*row_pad*col_pad, hipMemcpyHostToDevice));
result = new double[m*n];
hipStream_t detect;
hipStreamCreate(&detect);
hipLaunchKernelGGL(( CFAR_Gamma), dim3(griddim), dim3(blockdim), 0, detect, im_dev, T, r_c, r_g, m, n); //
// double **a;
// checkCudaErrors(hipMalloc((void**)&a, sizeof(double *)));
// griddim.x = 6;griddim.y = 6;
// malloc_global<<<griddim,blockdim,0>>>(a);
hipStreamSynchronize(detect);
checkCudaErrors(hipMemcpy(result, T, sizeof(double)*m*n, hipMemcpyDeviceToHost));
Mat detect_result = Mat::zeros(m, n, CV_8UC1);
for(int i = 0;i<m;i++)
{
for(int j = 0;j<n;j++)
{
if(result[i*n+j]>threshold)
detect_result.at<uchar>(i,j) = (unsigned char)255;
else
detect_result.at<uchar>(i,j) = (unsigned char)0;
}
}
hipStreamDestroy(detect);
end = clock();
imshow("origin" , image);
imshow("detected" , detect_result);
while(char(waitKey())!='q')
{
}
// FreeDoubleArray(im,arraydim);
image.release();
cout<<"GPU"<<(float)(end-start)/CLOCKS_PER_SEC<<end<<endl;
return 0 ;
}
| 6f68f806155aeaa258c131d7cbfd987457232dc5.cu | #include<mat_opt.h>
#include<stdio.h>
#include<fstream>
#include<stdint.h>
#include<assert.h>
#include<time.h>
#include<unistd.h>
#include<cuda_runtime.h>
#include<cublas_v2.h>
#include<thrust/sort.h>
#include"device_launch_parameters.h"
#include<helper_cuda.h>
#include<helper_functions.h>
#define MAX_DEPTH 16
#define INSERTION_SORT 32
using namespace cv;
__device__ void Memcpy(double *im, double *data, int row, int col, int r_c, int r_g, int n);
__device__ void selection_sort(double *data, int left, int right);
__device__ void simple_quicksort(double *data, int left, int right, int depth);
__global__ void lognormal_mixture(double *im, int r_c, int r_g, int k, double Pf, int m, int n)
{
/***********************************************************************
Ship detection based on lognormal mixture models
INPUT
im: padding SAR density image
r_c: radius of the reference window
r_g: radius of the guard area
K :number of components
Pf: false alarm rate
m : number of rows of input image
n : number of columns of input image
OUTPUT
im_prob: the cdf of simulate distribution with the im value
*************************************************************************/
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int size = r_c*r_c - r_g*r_g;
for(int i = 0;i<size;i++)
{
// data[i] = 1.0;
// printf("%.1f\n", data[i]);
}
}
__global__ void malloc_global(double **a)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if(i==10&&j==10)
{
int N = 10000;
printf("ok");
cudaMalloc((void**)a, sizeof(double)*N);
for(int i=0;i<N;i++)
{
(*a)[i] = i;
}
}
__syncthreads();
if(i==11&&j==11)
{
printf("%f\n",(*a)[500]);
}
}
__global__ void CFAR_Gamma(double *im, double *T, int r_c, int r_g, int m, int n) {
// n_pad为填充后图像的列数, n为原图像的列数
int row = threadIdx.x + blockDim.x * blockIdx.x;
int col = threadIdx.y + blockDim.y * blockIdx.y;
int size = (r_c*r_c-r_g*r_g)*4;int n_pad = n + 2*r_c;
double clutter_sum = 0, I_C = 0, I = 0, *clutter;
__shared__ double data[4600];
if(row < m && col < n)
{
int index = threadIdx.x + threadIdx.y*blockDim.x;
row = row + r_c; col = col + r_c; // 延拓后数据的索引位置发生改变
clutter = &data[index*size];
Memcpy(im, clutter, row, col, r_c, r_g, n_pad);
simple_quicksort(clutter, 0, size-1, 0);
int number = size * 0.65;
for(int i = 0; i< number; i++)
{
clutter_sum += clutter[i];
}
I_C = clutter_sum/number;
for(int i = row-1; i <= row+1;i++)
{
for(int j = col-1;j <= col+1;j++)
{
I += im[i*n_pad+col];
}
}
I = I/9;
T[(row-r_c)*n+(col-r_c)] = I/I_C;
// double *a;
// cudaMalloc((void **)&a,sizeof(double)*1000);
// for(int i=0;i<1000;i++)
// {
// a[i] = i;
// if(i==50)
// {printf("ok");printf("%f ", a[i]);}
// }
if(row==30&&col==30)
{
// for(int i=0;i<size;i++)
// {
// printf("%f ", clutter[i]);
// }
// printf("ok");
}
// cudaFree(a);
}
}
__device__ void Memcpy(double *im, double *data, int row, int col, int r_c, int r_g, int n)
{
//上部杂波 5x30
int index = 0;
for(int i = row-r_c;i<row-r_g;i++)
{
for(int j=col-r_c;j<=col+r_c;j++)
{
data[index] = im[i*n+j];
index += 1;
}
}
//下部杂波 5x30
for(int i = row+r_g+1;i<=row+r_c;i++)
{
for(int j=col-r_c;j<=col+r_c;j++)
{
data[index] = im[i*n+j];
index += 1;
}
}
//左侧杂波20x5
for(int i = row-r_g;i<=row+r_g;i++)
{
for(int j = col-r_c;j<col-r_g;j++)
{
data[index] = im[i*n+j];
index += 1;
}
}
//右侧杂波20x5
for(int i = row-r_g;i<=row+r_g;i++)
{
for(int j = col+r_g+1;j<=col+r_c;j++)
{
data[index] = im[i*n+j];
index += 1;
}
}
}
__device__ void selection_sort(double *data, int left, int right)
{
for(int i = left; i <= right; i++)
{
double min_val = data[i];
int min_idx = i;
for(int j = i+1; j <= right; j++)
{
double val_j = data[j];
if(val_j < min_val)
{
min_idx = j;
min_val = val_j;
}
}
if(i != min_idx)
{
data[min_idx] = data[i];
data[i] = min_val;
}
}
}
__device__ void simple_quicksort(double *data, int left, int right, int depth)
{
if (depth >= MAX_DEPTH || right-left <= INSERTION_SORT)
{
selection_sort(data, left, right);
return;
}
double *lptr = &data[left];
double *rptr = &data[right];
double pivot = data[(left+right)/2];
while(lptr <= rptr)
{
double lval = *lptr;
double rval = *rptr;
while(lval < pivot)
{
lptr++;
lval = *lptr;
}
while(rval > pivot)
{
rptr--;
rval = *rptr;
}
if(lptr <= rptr)
{
*lptr++ = rval;
*rptr-- = lval;
}
}
int nright = rptr - data;
int nleft = lptr - data;
if (left < (rptr-data))
{
simple_quicksort(data, left, nright, depth+1);
}
// Launch a new block to sort the right part.
if ((lptr-data) < right)
{
simple_quicksort(data, nleft, right, depth+1);
}
}
int main(int argc, char *argv[])
{
double **im, *im_pad, *im_dev, *T, *result, threshold;
int ch, opt_index, channels,m,n; // opt_index为选项在long_options中的索引
const char *optstring = "d:c:g:";
int r_c = 15, r_g = 10;threshold = 4.7;
dim3D arraydim;
const char *filename = "../data/data.bin";
clock_t start,end;
start = clock();
static struct option long_options[] = {
{"rc", required_argument, NULL,'c'},
{"rg", required_argument, NULL,'g'}
};
while((ch = getopt_long(argc, argv, optstring, long_options, &opt_index)) != -1)
{
switch(ch)
{
case 'd':
filename = optarg; break;
case 'c':
r_c = atoi(optarg); break;
case 'g':
r_g = atoi(optarg); break;
case '?':
cout<<"Unknown option: "<<(char)optopt<<endl;
break;
}
}
int deviceCount;
cudaGetDeviceCount(&deviceCount);
for(int i=0;i<deviceCount;i++)
{
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp,i);
cout << "GPU device:" << i << ": " << devProp.name <<endl;
cout << "global memory: " << devProp.totalGlobalMem / 1024 / 1024 << "MB" <<endl;
cout << "SM number:" << devProp.multiProcessorCount <<endl;
cout << "shared memory:" << (devProp.sharedMemPerBlock / 1024.0) <<"KB"<<endl;
cout << "block max_thread:" << devProp.maxThreadsPerBlock <<endl;
cout << "registers per Block:" << devProp.regsPerBlock <<endl;
cout << "SM max theads:" << devProp.maxThreadsPerMultiProcessor <<endl;
printf("GPU device has compute capabilities (SM %d.%d)\n", devProp.major, devProp.minor);
cout << "======================================================" <<endl;
}
ifstream infile(filename, ios::in | ios::binary);
infile.read((char *)&channels, sizeof(int));
infile.read((char *)&arraydim.m,sizeof(size_t));
infile.read((char *)&arraydim.n,sizeof(size_t));
m = (int)arraydim.m; n = (int)arraydim.n;
im = new double *[m];
for(int i=0;i<m;i++)
{
im[i] = new double[n];
for(int j=0;j<n;j++)
{
infile.read((char *)&im[i][j], sizeof(double));
}
}
Mat image = ArrayToImage(im, arraydim);
Mat origin_image = ArrayToMat(im, arraydim);
// double data[3][3] = { {1,2,3},{4,5,6},{7,8,9} };
Mat pad_image = PadArray(origin_image,r_c,r_c);
im_pad = pad_image.ptr<double>(0);
int row_pad = pad_image.rows;int col_pad = pad_image.cols;
dim3 blockdim(3,3);
dim3 griddim((m+blockdim.x-1)/blockdim.x , (n+blockdim.y-1)/blockdim.y);
checkCudaErrors(cudaMalloc((void**)&im_dev, sizeof(double)*row_pad*col_pad));
checkCudaErrors(cudaMalloc((void**)&T, sizeof(double)*m*n));
checkCudaErrors(cudaMemcpy(im_dev, im_pad, sizeof(double)*row_pad*col_pad, cudaMemcpyHostToDevice));
result = new double[m*n];
cudaStream_t detect;
cudaStreamCreate(&detect);
CFAR_Gamma<<<griddim, blockdim, 0, detect>>>(im_dev, T, r_c, r_g, m, n); //应该传入未填充的图像长宽系数
// double **a;
// checkCudaErrors(cudaMalloc((void**)&a, sizeof(double *)));
// griddim.x = 6;griddim.y = 6;
// malloc_global<<<griddim,blockdim,0>>>(a);
cudaStreamSynchronize(detect);
checkCudaErrors(cudaMemcpy(result, T, sizeof(double)*m*n, cudaMemcpyDeviceToHost));
Mat detect_result = Mat::zeros(m, n, CV_8UC1);
for(int i = 0;i<m;i++)
{
for(int j = 0;j<n;j++)
{
if(result[i*n+j]>threshold)
detect_result.at<uchar>(i,j) = (unsigned char)255;
else
detect_result.at<uchar>(i,j) = (unsigned char)0;
}
}
cudaStreamDestroy(detect);
end = clock();
imshow("origin" , image);
imshow("detected" , detect_result);
while(char(waitKey())!='q')
{
}
// FreeDoubleArray(im,arraydim);
image.release();
cout<<"GPU用时:"<<(float)(end-start)/CLOCKS_PER_SEC<<end<<endl;
return 0 ;
}
|
efc0ecece10ce925d3a28c61023a165b112e1032.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 256, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| efc0ecece10ce925d3a28c61023a165b112e1032.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 256, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
7f06a19ff393ae228e1c88652b757f814f12a5e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "labels.h"
namespace kmeans {
namespace detail {
struct cublas_state {
hipblasHandle_t cublas_handle;
cublas_state() {
hipblasStatus_t stat;
stat = hipblasCreate(&cublas_handle);
if (stat != HIPBLAS_STATUS_SUCCESS) {
std::cout << "CUBLAS initialization failed" << std::endl;
exit(1);
}
}
~cublas_state() {
hipblasStatus_t stat;
stat = hipblasDestroy(cublas_handle);
if (stat != HIPBLAS_STATUS_SUCCESS) {
std::cout << "CUBLAS destruction failed" << std::endl;
exit(1);
}
}
};
cublas_state state;
void gemm(hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k, const float *alpha,
const float *A, int lda, const float *B, int ldb,
const float *beta,
float *C, int ldc) {
hipblasStatus_t status = hipblasSgemm(state.cublas_handle, transa, transb,
m, n, k, alpha,
A, lda, B, ldb,
beta,
C, ldc);
if (status != HIPBLAS_STATUS_SUCCESS) {
std::cout << "Invalid Sgemm" << std::endl;
exit(1);
}
}
void gemm(hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k, const double *alpha,
const double *A, int lda, const double *B, int ldb,
const double *beta,
double *C, int ldc) {
hipblasStatus_t status = hipblasDgemm(state.cublas_handle, transa, transb,
m, n, k, alpha,
A, lda, B, ldb,
beta,
C, ldc);
if (status != HIPBLAS_STATUS_SUCCESS) {
std::cout << "Invalid Dgemm" << std::endl;
exit(1);
}
}
}
}
| 7f06a19ff393ae228e1c88652b757f814f12a5e5.cu | #include "labels.h"
namespace kmeans {
namespace detail {
struct cublas_state {
cublasHandle_t cublas_handle;
cublas_state() {
cublasStatus_t stat;
stat = cublasCreate(&cublas_handle);
if (stat != CUBLAS_STATUS_SUCCESS) {
std::cout << "CUBLAS initialization failed" << std::endl;
exit(1);
}
}
~cublas_state() {
cublasStatus_t stat;
stat = cublasDestroy(cublas_handle);
if (stat != CUBLAS_STATUS_SUCCESS) {
std::cout << "CUBLAS destruction failed" << std::endl;
exit(1);
}
}
};
cublas_state state;
void gemm(cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k, const float *alpha,
const float *A, int lda, const float *B, int ldb,
const float *beta,
float *C, int ldc) {
cublasStatus_t status = cublasSgemm(state.cublas_handle, transa, transb,
m, n, k, alpha,
A, lda, B, ldb,
beta,
C, ldc);
if (status != CUBLAS_STATUS_SUCCESS) {
std::cout << "Invalid Sgemm" << std::endl;
exit(1);
}
}
void gemm(cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k, const double *alpha,
const double *A, int lda, const double *B, int ldb,
const double *beta,
double *C, int ldc) {
cublasStatus_t status = cublasDgemm(state.cublas_handle, transa, transb,
m, n, k, alpha,
A, lda, B, ldb,
beta,
C, ldc);
if (status != CUBLAS_STATUS_SUCCESS) {
std::cout << "Invalid Dgemm" << std::endl;
exit(1);
}
}
}
}
|
31de038ad2d9e44b2dfa5388bf696bf1ac0bfc9c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*** Written by Salil Deosthale 11/30/2012 ***/
#include "features.cuh"
#include <limits>
#include <iostream>
namespace nscale {
namespace gpu {
using namespace cv::gpu;
__global__ void area(const int *boundingBoxInfo, int compCount, const cv::gpu::PtrStep_<int> labeledMask, int *areaRes)
{
//Declare a shared array "thread_area[NumThreads]". This will hold the value of the area each thread walks through
__shared__ int thread_area[32];
//Zero out the thread_area array
thread_area[threadIdx.x] = 0;
//Pointer to a row of the image (I NEED TO CHANGE THIS.SHOULD I MAKE THIS SHARED?)
const int *labeledImgPtr;
//Label of this current component
int label = boundingBoxInfo[blockIdx.x];
int maxX = boundingBoxInfo[2 * compCount + blockIdx.x];
int maxY = boundingBoxInfo[4 * compCount + blockIdx.x];
for(int x = boundingBoxInfo[compCount + blockIdx.x] +threadIdx.x; x <= maxX ; x+=gridDim.x)
{
for(int y = boundingBoxInfo[3 * compCount + blockIdx.x] ; y <= maxY ; y++)
{
labeledImgPtr = labeledMask.ptr(y);
thread_area[threadIdx.x] += (labeledImgPtr[x] == label ? 1 : 0);
}
}
__syncthreads();
//Now, we do a parallel reduction using sequential addressing
unsigned int s;
for(s = blockDim.x /2 ; s > 0 ; s >>= 1)
{
if(threadIdx.x < s)
{
thread_area[threadIdx.x] += thread_area[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) areaRes[blockIdx.x] = thread_area[0];
}
__global__ void perimeter(const int *boundingBoxInfo, int compCount, const cv::gpu::PtrStep_<int> labeledMask, float *perimeterRes)
{
//Declare a shared array called 'lookup'. This will hold the lookup table. Each block will have this lookup table in its shared memory
__shared__ float lookup[16];
lookup[8] = 0.70710678118;
lookup[4] = 0.70710678118;
lookup[2] = 0.70710678118;
lookup[1] = 0.70710678118;
lookup[3] = 1.0;
lookup[6] = 1.0;
lookup[9] = 1.0;
lookup[12] = 1.0;
lookup[7] = 0.70710678118;
lookup[11] = 0.70710678118;
lookup[13] = 0.70710678118;
lookup[14] = 0.70710678118;
lookup[10] = 1.41421356237;
lookup[5] = 1.41421356237;
lookup[0] = 0.0;
lookup[15] = 0.0;
//------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
//Declare shared array for the perimeter that each thread encounters. Initialize it by zeroing it out.
__shared__ float thread_perimeter[32];
thread_perimeter[threadIdx.x] = 0.0;
//Declare a shared mask array for each block.
__shared__ int mask[32];
//------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
//Label of current component, maxX and maxY of current bounding box
int label = boundingBoxInfo[blockIdx.x];
int minX = boundingBoxInfo[compCount + blockIdx.x];
int minY = boundingBoxInfo[3 * compCount + blockIdx.x];
int maxX = boundingBoxInfo[2 * compCount + blockIdx.x];
int maxY = boundingBoxInfo[4 * compCount + blockIdx.x];
//------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
//Walk through the centre of the bounding box. From xmin to xmax-1
for(int x = minX + threadIdx.x ; x < maxX ; x+=blockDim.x)
{
for(int y = minY ; y < maxY ; y++)
{
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(y)[x] == label); //(0,0)
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(y)[x+1] == label );//(1,0)
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(y+1)[x+1] == label );//(1,1)
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(y+1)[x] == label );//(0,1)
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
}
//Walk through the top and bottom edges of the bounding box.
for(int x = minX + threadIdx.x ; x < maxX ; x+=blockDim.x)
{
//Top row : Read->leftshift->read->leftshiftby2
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(maxY)[x] == label);
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(maxY)[x+1] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
//Bottom row : leftshiftby2->read->leftshift->read
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(minY)[x+1] == label);
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(minY)[x] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
//Walk through the left and right edges of the bounding box
for(int y = minY + threadIdx.x ; y < maxY ; y+=blockDim.x)
{
//Left edge : leftshift->read->leftshift->read->leftshift
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(y)[minX] == label);
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(y+1)[minX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
//Right edge : read->leftshiftby3->read
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(y)[maxX] == label);
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(y+1)[maxX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
//Corners
if(threadIdx.x == 0) //Bottom left corner (0,0)
{
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(minY)[minX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
if(threadIdx.x == 8) //Bottom right corner (1,0)
{
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(minY)[maxX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
if(threadIdx.x == 16) // Top right corner (1,1)
{
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(maxY)[maxX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
if(threadIdx.x == 24) // Top left corner (0,1)
{
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(maxY)[minX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
__syncthreads();
//Now, we do a parallel reduction using sequential addressing
unsigned int s;
for(s = blockDim.x /2 ; s > 0 ; s >>= 1)
{
if(threadIdx.x < s)
{
thread_perimeter[threadIdx.x] += thread_perimeter[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) perimeterRes[blockIdx.x] = thread_perimeter[0];
}
__global__ void ellipse(const int* boundingBoxInfo , const int compCount , const cv::gpu::PtrStep_ <int> labeledMask , int *areaRes , float *majorAxis , float* minorAxis , float* ecc)
{
//Create shared arrays for sx , sy , sxy , ssqx , ssqy
__shared__ float sx[32]; sx[threadIdx.x] = 0;
__shared__ float sy[32]; sy[threadIdx.x] = 0;
__shared__ float sxy[32]; sxy[threadIdx.x] = 0;
__shared__ float ssqx[32]; ssqx[threadIdx.x] = 0;
__shared__ float ssqy[32]; ssqy[threadIdx.x] = 0;
//------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
//fix the parameters according to the blockId
int label = boundingBoxInfo[blockIdx.x];
int minX = boundingBoxInfo[compCount + blockIdx.x];
int maxX = boundingBoxInfo[2 * compCount + blockIdx.x];
float midX = (float)(minX+maxX)/2.0;
int minY = boundingBoxInfo[3 * compCount + blockIdx.x];
int maxY = boundingBoxInfo[4 * compCount + blockIdx.x];
float midY = (float)(minY+maxY)/2.0;
//------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
//Walk through the labeled Image
for( int x = minX + threadIdx.x ; x <= maxX ; x+=blockDim.x)
{
float cx = (float)x - midX;
for( int y = minY ; y <=maxY ; y++)
{
float cy = (float)y - midY;
bool temp = (labeledMask.ptr(y))[x] == label;
sx[threadIdx.x] += ( temp ? cx : 0);
sy[threadIdx.x] += ( temp ? cy : 0);
sxy[threadIdx.x] += ( temp ? cx*cy : 0);
ssqx[threadIdx.x] += ( temp ? cx*cx : 0);
ssqy[threadIdx.x] += ( temp ? cy*cy : 0);
}
}
__syncthreads();
//------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
//Now do a parallel scan to complete the summation
unsigned int s;
for(s = blockDim.x /2 ; s > 0 ; s >>= 1)
{
if(threadIdx.x < s)
{
sx[threadIdx.x] += sx[threadIdx.x + s];
sy[threadIdx.x] += sy[threadIdx.x + s];
sxy[threadIdx.x] += sxy[threadIdx.x + s];
ssqx[threadIdx.x] += ssqx[threadIdx.x + s];
ssqy[threadIdx.x] += ssqy[threadIdx.x + s];
}
__syncthreads();
}
//------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
//Now do the remaining calculations
if (threadIdx.x == 0)
{
float frac = 1.0 / 12.0;
float root = sqrtf(8.0);
float area = (float)areaRes[blockIdx.x];
float xbar = (float)sx[0] / area;
float ybar = (float)sy[0] / area;
float mxx = (float)ssqx[0]/area - xbar*xbar+ frac;
float myy = (float)ssqy[0]/area - ybar*ybar+ frac;
float mxy = (float)sxy[0]/area - xbar*ybar;
float delta = sqrtf((mxx-myy)*(mxx-myy) + 4.0 * mxy * mxy); //discriminant = sqrt(b*b-4*a*c)
majorAxis[blockIdx.x] = root*sqrtf(mxx+myy+delta);
minorAxis[blockIdx.x] = root*sqrtf(mxx+myy-delta);
ecc[blockIdx.x] = sqrtf(majorAxis[blockIdx.x] * majorAxis[blockIdx.x] - minorAxis[blockIdx.x] * minorAxis[blockIdx.x])/majorAxis[blockIdx.x];
}
}
__global__ void big_features(const int *boundingBoxInfo, int compCount, const cv::gpu::PtrStep_<int> labeledMask, int *areaRes, float* perimeterRes , float* majorAxis , float* minorAxis , float* ecc)
{
/*****************************************FIRST DECLARE THE SHARED ARRAYS FOR ALL THE FEATURES****************************************************/
__shared__ int thread_area[32]; //shared array for thread area
thread_area[threadIdx.x] = 0;
__shared__ float lookup[16]; //shared array lookup table for thread perimeter
lookup[8] = 0.70710678118;
lookup[4] = 0.70710678118;
lookup[2] = 0.70710678118;
lookup[1] = 0.70710678118;
lookup[3] = 1.0;
lookup[6] = 1.0;
lookup[9] = 1.0;
lookup[12] = 1.0;
lookup[7] = 0.70710678118;
lookup[11] = 0.70710678118;
lookup[13] = 0.70710678118;
lookup[14] = 0.70710678118;
lookup[10] = 1.41421356237;
lookup[5] = 1.41421356237;
lookup[0] = 0.0;
lookup[15] = 0.0;
__shared__ float thread_perimeter[32]; //shared array for thread perimeter
thread_perimeter[threadIdx.x] = 0.0;
__shared__ int mask[32]; //shared array mask for perimeter
__shared__ float sx[32]; sx[threadIdx.x] = 0; //shared arrays for ellipse calculations
__shared__ float sy[32]; sy[threadIdx.x] = 0;
__shared__ float sxy[32]; sxy[threadIdx.x] = 0;
__shared__ float ssqx[32]; ssqx[threadIdx.x] = 0;
__shared__ float ssqy[32]; ssqy[threadIdx.x] = 0;
/******************************************NOW DECLARE ALL THE DETAILS RELATED TO BOUNDING BOX**********************************************/
int label = boundingBoxInfo[blockIdx.x];
int minX = boundingBoxInfo[compCount + blockIdx.x];
int maxX = boundingBoxInfo[2 * compCount + blockIdx.x];
float midX = (float)(minX+maxX)/2.0;
int minY = boundingBoxInfo[3 * compCount + blockIdx.x];
int maxY = boundingBoxInfo[4 * compCount + blockIdx.x];
float midY = (float)(minY+maxY)/2.0;
/******************************************NOW WALK THROUGH THE IMAGE FOR ONE IMAGE AT A TIME**************************************************/
///////////////////////////////////////////////////////////////////////////AREA//////////////////////////////////////////////////////////////////
for(int x = minX +threadIdx.x; x <= maxX ; x+=blockDim.x)
{
for(int y = minY ; y <= maxY ; y++)
{
thread_area[threadIdx.x] += ((labeledMask.ptr(y))[x] == label ? 1 : 0);
}
}
__syncthreads();
/////////////////////////////////////////////////////////////////////////PERIMETER//////////////////////////////////////////////////////////////
//Walk through the centre of the bounding box. From xmin to xmax-1
for(int x = minX + threadIdx.x ; x < maxX ; x+=blockDim.x)
{
for(int y = minY ; y < maxY ; y++)
{
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(y)[x] == label); //(0,0)
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(y)[x+1] == label );//(1,0)
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(y+1)[x+1] == label );//(1,1)
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(y+1)[x] == label );//(0,1)
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
}
//Walk through the top and bottom edges of the bounding box.
for(int x = minX + threadIdx.x ; x < maxX ; x+=blockDim.x)
{
//Top row : Read->leftshift->read->leftshiftby2
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(maxY)[x] == label);
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(maxY)[x+1] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
//Bottom row : leftshiftby2->read->leftshift->read
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(minY)[x+1] == label);
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(minY)[x] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
//Walk through the left and right edges of the bounding box
for(int y = minY + threadIdx.x ; y < maxY ; y+=blockDim.x)
{
//Left edge : leftshift->read->leftshift->read->leftshift
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(y)[minX] == label);
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(y+1)[minX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
//Right edge : read->leftshiftby3->read
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(y)[maxX] == label);
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(y+1)[maxX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
//Corners
if(threadIdx.x == 0) //Bottom left corner (0,0)
{
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(minY)[minX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
if(threadIdx.x == 8) //Bottom right corner (1,0)
{
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(minY)[maxX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
if(threadIdx.x == 16) // Top right corner (1,1)
{
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(maxY)[maxX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
if(threadIdx.x == 24) // Top left corner (0,1)
{
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(maxY)[minX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
__syncthreads();
/////////////////////////////////////////////////////////ELLIPSE/////////////////////////////////////////////////////////////////////////
//Walk through the labeled Image
for( int x = minX + threadIdx.x ; x <= maxX ; x+=blockDim.x)
{
float cx = (float)x - midX;
for( int y = minY ; y <=maxY ; y++)
{
float cy = (float)y - midY;
bool temp = (labeledMask.ptr(y))[x] == label;
sx[threadIdx.x] += ( temp ? cx : 0);
sy[threadIdx.x] += ( temp ? cy : 0);
sxy[threadIdx.x] += ( temp ? cx*cy : 0);
ssqx[threadIdx.x] += ( temp ? cx*cx : 0);
ssqy[threadIdx.x] += ( temp ? cy*cy : 0);
}
}
__syncthreads();
/************************************************PARALLEL SCAN OPERATION FOR ALL THE SHARED ARRAYS********************************************************/
unsigned int s;
for(s = blockDim.x /2 ; s > 0 ; s >>= 1)
{
if(threadIdx.x < s)
{
//Area
thread_area[threadIdx.x] += thread_area[threadIdx.x + s];
//Perimeter
thread_perimeter[threadIdx.x] += thread_perimeter[threadIdx.x + s];
//Ellipse
sx[threadIdx.x] += sx[threadIdx.x + s];
sy[threadIdx.x] += sy[threadIdx.x + s];
sxy[threadIdx.x] += sxy[threadIdx.x + s];
ssqx[threadIdx.x] += ssqx[threadIdx.x + s];
ssqy[threadIdx.x] += ssqy[threadIdx.x + s];
}
__syncthreads();
}
/*********************************************************************CONSOLIDATE!!!!*******************************************************************/
if (threadIdx.x == 0)
{
//Area
areaRes[blockIdx.x] = thread_area[0];
//Perimeter
perimeterRes[blockIdx.x] = thread_perimeter[0];
//Ellipse
float frac = 1.0 / 12.0;
float root = sqrtf(8.0);
float area = (float)areaRes[blockIdx.x];
float xbar = (float)sx[0] / area;
float ybar = (float)sy[0] / area;
float mxx = (float)ssqx[0]/area - xbar*xbar+ frac;
float myy = (float)ssqy[0]/area - ybar*ybar+ frac;
float mxy = (float)sxy[0]/area - xbar*ybar;
float delta = sqrtf((mxx-myy)*(mxx-myy) + 4.0 * mxy * mxy); //discriminant = sqrt(b*b-4*a*c)
majorAxis[blockIdx.x] = root*sqrtf(mxx+myy+delta);
minorAxis[blockIdx.x] = root*sqrtf(mxx+myy-delta);
ecc[blockIdx.x] = sqrtf(majorAxis[blockIdx.x] * majorAxis[blockIdx.x] - minorAxis[blockIdx.x] * minorAxis[blockIdx.x])/majorAxis[blockIdx.x];
}
}
__global__ void extentratio(const int *boundingBoxInfo , const int compCount , const int *areaRes , float *extent_ratio)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid < compCount)
{
int xmin = boundingBoxInfo[compCount + tid];
int xmax = boundingBoxInfo[2 * compCount + tid];
int ymin = boundingBoxInfo[3 * compCount + tid];
int ymax = boundingBoxInfo[4 * compCount + tid];
extent_ratio[tid] = (float)areaRes[tid] / (float)((xmax-xmin+1) * (ymax-ymin+1));
}
}
__global__ void circularity(const int compCount , const int *areaRes , const float *perimeterRes , float *circ)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid < compCount)
{
circ[tid] = (4.0 * 3.14159265359 * (float)areaRes[tid]) / (perimeterRes[tid] * perimeterRes[tid]) ;
}
}
__global__ void small_features(const int *boundingBoxInfo , const int compCount , const int *areaRes , const float *perimeterRes , float *extent_ratio , float *circ)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if( tid < compCount)
{
int xmin = boundingBoxInfo[compCount + tid];
int xmax = boundingBoxInfo[2 * compCount + tid];
int ymin = boundingBoxInfo[3 * compCount + tid];
int ymax = boundingBoxInfo[4 * compCount + tid];
extent_ratio[tid] = (float)areaRes[tid] / (float)((xmax-xmin+1) * (ymax-ymin+1));
circ[tid] = (4.0 * 3.14159265359 * (float)areaRes[tid]) / (perimeterRes[tid] * perimeterRes[tid]);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void AreaCaller(const int* boundingBoxInfo , const int compCount ,const cv::gpu::PtrStep_<int> labeledMask, int *areaRes, hipStream_t stream)
{
dim3 threads(32,1);
dim3 grid(compCount, 1);
hipLaunchKernelGGL(( area), dim3(grid),dim3(threads),0,stream, boundingBoxInfo, compCount, labeledMask,areaRes);
hipGetLastError();
if(stream == 0)
hipDeviceSynchronize();
}
void PerimeterCaller(const int* boundingBoxInfo , const int compCount ,const cv::gpu::PtrStep_<int> labeledMask , float *perimeterRes, hipStream_t stream)
{
dim3 threads(32,1);
dim3 grid(compCount,1);
hipLaunchKernelGGL(( perimeter) , dim3(grid), dim3(threads), 0 ,stream , boundingBoxInfo, compCount , labeledMask, perimeterRes);
hipGetLastError();
if(stream == 0)
hipDeviceSynchronize();
}
void EllipseCaller(const int* boundingBoxInfo , const int compCount , const cv::gpu::PtrStep_ <int> labeledMask , int *areaRes , float *majorAxis , float *minorAxis , float *ecc, hipStream_t stream)
{
dim3 threads(32,1);
dim3 grid(compCount , 1);
hipLaunchKernelGGL(( ellipse) , dim3(grid) , dim3(threads) , 0 , stream , boundingBoxInfo , compCount , labeledMask , areaRes , majorAxis , minorAxis , ecc);
hipGetLastError();
if(stream == 0)
hipDeviceSynchronize();
}
void ExtentRatioCaller(const int *boundingBoxInfo , const int compCount , const int *areaRes , float *extent_ratio , hipStream_t stream)
{
dim3 threads(32,1);
dim3 grid((compCount + 32 -1)/32 , 1);
hipLaunchKernelGGL(( extentratio) , dim3(grid) , dim3(threads) , 0 , stream , boundingBoxInfo , compCount , areaRes , extent_ratio);
hipGetLastError();
if(stream == 0)
hipDeviceSynchronize();
}
void CircularityCaller(const int compCount , const int *areaRes , const float *perimeterRes , float *circ, hipStream_t stream)
{
dim3 threads(32,1);
dim3 grid((compCount + 32 - 1)/32 , 1);
hipLaunchKernelGGL(( circularity) , dim3(grid) , dim3(threads) , 0 , stream , compCount , areaRes , perimeterRes , circ);
hipGetLastError();
if(stream == 0)
hipDeviceSynchronize();
}
void BigFeaturesCaller(const int* boundingBoxInfo , const int compCount , const cv::gpu::PtrStep_<int> labeledMask , int* areaRes , float* perimeterRes , float* majorAxis , float* minorAxis , float* ecc, hipStream_t stream)
{
dim3 threads(32,1);
dim3 grid(compCount , 1);
hipLaunchKernelGGL(( big_features) , dim3(grid) , dim3(threads) , 0 , stream , boundingBoxInfo , compCount , labeledMask , areaRes , perimeterRes , majorAxis , minorAxis , ecc );
hipGetLastError();
if(stream == 0)
hipDeviceSynchronize();
}
void SmallFeaturesCaller(const int *boundingBoxInfo , const int compCount , const int *areaRes , const float *perimeterRes , float *extent_ratio , float* circ , hipStream_t stream)
{
dim3 threads(32,1);
dim3 grid((compCount + 32 -1)/32 , 1);
hipLaunchKernelGGL(( small_features) , dim3(grid) , dim3(threads) , 0 , stream , boundingBoxInfo , compCount , areaRes , perimeterRes , extent_ratio , circ);
hipGetLastError();
if(stream == 0)
hipDeviceSynchronize();
}
}
}
| 31de038ad2d9e44b2dfa5388bf696bf1ac0bfc9c.cu | /*** Written by Salil Deosthale 11/30/2012 ***/
#include "features.cuh"
#include <limits>
#include <iostream>
namespace nscale {
namespace gpu {
using namespace cv::gpu;
__global__ void area(const int *boundingBoxInfo, int compCount, const cv::gpu::PtrStep_<int> labeledMask, int *areaRes)
{
//Declare a shared array "thread_area[NumThreads]". This will hold the value of the area each thread walks through
__shared__ int thread_area[32];
//Zero out the thread_area array
thread_area[threadIdx.x] = 0;
//Pointer to a row of the image (I NEED TO CHANGE THIS.SHOULD I MAKE THIS SHARED?)
const int *labeledImgPtr;
//Label of this current component
int label = boundingBoxInfo[blockIdx.x];
int maxX = boundingBoxInfo[2 * compCount + blockIdx.x];
int maxY = boundingBoxInfo[4 * compCount + blockIdx.x];
for(int x = boundingBoxInfo[compCount + blockIdx.x] +threadIdx.x; x <= maxX ; x+=gridDim.x)
{
for(int y = boundingBoxInfo[3 * compCount + blockIdx.x] ; y <= maxY ; y++)
{
labeledImgPtr = labeledMask.ptr(y);
thread_area[threadIdx.x] += (labeledImgPtr[x] == label ? 1 : 0);
}
}
__syncthreads();
//Now, we do a parallel reduction using sequential addressing
unsigned int s;
for(s = blockDim.x /2 ; s > 0 ; s >>= 1)
{
if(threadIdx.x < s)
{
thread_area[threadIdx.x] += thread_area[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) areaRes[blockIdx.x] = thread_area[0];
}
__global__ void perimeter(const int *boundingBoxInfo, int compCount, const cv::gpu::PtrStep_<int> labeledMask, float *perimeterRes)
{
//Declare a shared array called 'lookup'. This will hold the lookup table. Each block will have this lookup table in its shared memory
__shared__ float lookup[16];
lookup[8] = 0.70710678118;
lookup[4] = 0.70710678118;
lookup[2] = 0.70710678118;
lookup[1] = 0.70710678118;
lookup[3] = 1.0;
lookup[6] = 1.0;
lookup[9] = 1.0;
lookup[12] = 1.0;
lookup[7] = 0.70710678118;
lookup[11] = 0.70710678118;
lookup[13] = 0.70710678118;
lookup[14] = 0.70710678118;
lookup[10] = 1.41421356237;
lookup[5] = 1.41421356237;
lookup[0] = 0.0;
lookup[15] = 0.0;
//------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
//Declare shared array for the perimeter that each thread encounters. Initialize it by zeroing it out.
__shared__ float thread_perimeter[32];
thread_perimeter[threadIdx.x] = 0.0;
//Declare a shared mask array for each block.
__shared__ int mask[32];
//------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
//Label of current component, maxX and maxY of current bounding box
int label = boundingBoxInfo[blockIdx.x];
int minX = boundingBoxInfo[compCount + blockIdx.x];
int minY = boundingBoxInfo[3 * compCount + blockIdx.x];
int maxX = boundingBoxInfo[2 * compCount + blockIdx.x];
int maxY = boundingBoxInfo[4 * compCount + blockIdx.x];
//------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
//Walk through the centre of the bounding box. From xmin to xmax-1
for(int x = minX + threadIdx.x ; x < maxX ; x+=blockDim.x)
{
for(int y = minY ; y < maxY ; y++)
{
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(y)[x] == label); //(0,0)
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(y)[x+1] == label );//(1,0)
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(y+1)[x+1] == label );//(1,1)
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(y+1)[x] == label );//(0,1)
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
}
//Walk through the top and bottom edges of the bounding box.
for(int x = minX + threadIdx.x ; x < maxX ; x+=blockDim.x)
{
//Top row : Read->leftshift->read->leftshiftby2
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(maxY)[x] == label);
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(maxY)[x+1] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
//Bottom row : leftshiftby2->read->leftshift->read
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(minY)[x+1] == label);
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(minY)[x] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
//Walk through the left and right edges of the bounding box
for(int y = minY + threadIdx.x ; y < maxY ; y+=blockDim.x)
{
//Left edge : leftshift->read->leftshift->read->leftshift
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(y)[minX] == label);
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(y+1)[minX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
//Right edge : read->leftshiftby3->read
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(y)[maxX] == label);
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(y+1)[maxX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
//Corners
if(threadIdx.x == 0) //Bottom left corner (0,0)
{
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(minY)[minX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
if(threadIdx.x == 8) //Bottom right corner (1,0)
{
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(minY)[maxX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
if(threadIdx.x == 16) // Top right corner (1,1)
{
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(maxY)[maxX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
if(threadIdx.x == 24) // Top left corner (0,1)
{
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(maxY)[minX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
__syncthreads();
//Now, we do a parallel reduction using sequential addressing
unsigned int s;
for(s = blockDim.x /2 ; s > 0 ; s >>= 1)
{
if(threadIdx.x < s)
{
thread_perimeter[threadIdx.x] += thread_perimeter[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) perimeterRes[blockIdx.x] = thread_perimeter[0];
}
__global__ void ellipse(const int* boundingBoxInfo , const int compCount , const cv::gpu::PtrStep_ <int> labeledMask , int *areaRes , float *majorAxis , float* minorAxis , float* ecc)
{
//Create shared arrays for sx , sy , sxy , ssqx , ssqy
__shared__ float sx[32]; sx[threadIdx.x] = 0;
__shared__ float sy[32]; sy[threadIdx.x] = 0;
__shared__ float sxy[32]; sxy[threadIdx.x] = 0;
__shared__ float ssqx[32]; ssqx[threadIdx.x] = 0;
__shared__ float ssqy[32]; ssqy[threadIdx.x] = 0;
//------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
//fix the parameters according to the blockId
int label = boundingBoxInfo[blockIdx.x];
int minX = boundingBoxInfo[compCount + blockIdx.x];
int maxX = boundingBoxInfo[2 * compCount + blockIdx.x];
float midX = (float)(minX+maxX)/2.0;
int minY = boundingBoxInfo[3 * compCount + blockIdx.x];
int maxY = boundingBoxInfo[4 * compCount + blockIdx.x];
float midY = (float)(minY+maxY)/2.0;
//------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
//Walk through the labeled Image
for( int x = minX + threadIdx.x ; x <= maxX ; x+=blockDim.x)
{
float cx = (float)x - midX;
for( int y = minY ; y <=maxY ; y++)
{
float cy = (float)y - midY;
bool temp = (labeledMask.ptr(y))[x] == label;
sx[threadIdx.x] += ( temp ? cx : 0);
sy[threadIdx.x] += ( temp ? cy : 0);
sxy[threadIdx.x] += ( temp ? cx*cy : 0);
ssqx[threadIdx.x] += ( temp ? cx*cx : 0);
ssqy[threadIdx.x] += ( temp ? cy*cy : 0);
}
}
__syncthreads();
//------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
//Now do a parallel scan to complete the summation
unsigned int s;
for(s = blockDim.x /2 ; s > 0 ; s >>= 1)
{
if(threadIdx.x < s)
{
sx[threadIdx.x] += sx[threadIdx.x + s];
sy[threadIdx.x] += sy[threadIdx.x + s];
sxy[threadIdx.x] += sxy[threadIdx.x + s];
ssqx[threadIdx.x] += ssqx[threadIdx.x + s];
ssqy[threadIdx.x] += ssqy[threadIdx.x + s];
}
__syncthreads();
}
//------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
//Now do the remaining calculations
if (threadIdx.x == 0)
{
float frac = 1.0 / 12.0;
float root = sqrtf(8.0);
float area = (float)areaRes[blockIdx.x];
float xbar = (float)sx[0] / area;
float ybar = (float)sy[0] / area;
float mxx = (float)ssqx[0]/area - xbar*xbar+ frac;
float myy = (float)ssqy[0]/area - ybar*ybar+ frac;
float mxy = (float)sxy[0]/area - xbar*ybar;
float delta = sqrtf((mxx-myy)*(mxx-myy) + 4.0 * mxy * mxy); //discriminant = sqrt(b*b-4*a*c)
majorAxis[blockIdx.x] = root*sqrtf(mxx+myy+delta);
minorAxis[blockIdx.x] = root*sqrtf(mxx+myy-delta);
ecc[blockIdx.x] = sqrtf(majorAxis[blockIdx.x] * majorAxis[blockIdx.x] - minorAxis[blockIdx.x] * minorAxis[blockIdx.x])/majorAxis[blockIdx.x];
}
}
__global__ void big_features(const int *boundingBoxInfo, int compCount, const cv::gpu::PtrStep_<int> labeledMask, int *areaRes, float* perimeterRes , float* majorAxis , float* minorAxis , float* ecc)
{
/*****************************************FIRST DECLARE THE SHARED ARRAYS FOR ALL THE FEATURES****************************************************/
__shared__ int thread_area[32]; //shared array for thread area
thread_area[threadIdx.x] = 0;
__shared__ float lookup[16]; //shared array lookup table for thread perimeter
lookup[8] = 0.70710678118;
lookup[4] = 0.70710678118;
lookup[2] = 0.70710678118;
lookup[1] = 0.70710678118;
lookup[3] = 1.0;
lookup[6] = 1.0;
lookup[9] = 1.0;
lookup[12] = 1.0;
lookup[7] = 0.70710678118;
lookup[11] = 0.70710678118;
lookup[13] = 0.70710678118;
lookup[14] = 0.70710678118;
lookup[10] = 1.41421356237;
lookup[5] = 1.41421356237;
lookup[0] = 0.0;
lookup[15] = 0.0;
__shared__ float thread_perimeter[32]; //shared array for thread perimeter
thread_perimeter[threadIdx.x] = 0.0;
__shared__ int mask[32]; //shared array mask for perimeter
__shared__ float sx[32]; sx[threadIdx.x] = 0; //shared arrays for ellipse calculations
__shared__ float sy[32]; sy[threadIdx.x] = 0;
__shared__ float sxy[32]; sxy[threadIdx.x] = 0;
__shared__ float ssqx[32]; ssqx[threadIdx.x] = 0;
__shared__ float ssqy[32]; ssqy[threadIdx.x] = 0;
/******************************************NOW DECLARE ALL THE DETAILS RELATED TO BOUNDING BOX**********************************************/
int label = boundingBoxInfo[blockIdx.x];
int minX = boundingBoxInfo[compCount + blockIdx.x];
int maxX = boundingBoxInfo[2 * compCount + blockIdx.x];
float midX = (float)(minX+maxX)/2.0;
int minY = boundingBoxInfo[3 * compCount + blockIdx.x];
int maxY = boundingBoxInfo[4 * compCount + blockIdx.x];
float midY = (float)(minY+maxY)/2.0;
/******************************************NOW WALK THROUGH THE IMAGE FOR ONE IMAGE AT A TIME**************************************************/
///////////////////////////////////////////////////////////////////////////AREA//////////////////////////////////////////////////////////////////
for(int x = minX +threadIdx.x; x <= maxX ; x+=blockDim.x)
{
for(int y = minY ; y <= maxY ; y++)
{
thread_area[threadIdx.x] += ((labeledMask.ptr(y))[x] == label ? 1 : 0);
}
}
__syncthreads();
/////////////////////////////////////////////////////////////////////////PERIMETER//////////////////////////////////////////////////////////////
//Walk through the centre of the bounding box. From xmin to xmax-1
for(int x = minX + threadIdx.x ; x < maxX ; x+=blockDim.x)
{
for(int y = minY ; y < maxY ; y++)
{
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(y)[x] == label); //(0,0)
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(y)[x+1] == label );//(1,0)
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(y+1)[x+1] == label );//(1,1)
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(y+1)[x] == label );//(0,1)
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
}
//Walk through the top and bottom edges of the bounding box.
for(int x = minX + threadIdx.x ; x < maxX ; x+=blockDim.x)
{
//Top row : Read->leftshift->read->leftshiftby2
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(maxY)[x] == label);
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(maxY)[x+1] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
//Bottom row : leftshiftby2->read->leftshift->read
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(minY)[x+1] == label);
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(minY)[x] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
//Walk through the left and right edges of the bounding box
for(int y = minY + threadIdx.x ; y < maxY ; y+=blockDim.x)
{
//Left edge : leftshift->read->leftshift->read->leftshift
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(y)[minX] == label);
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(y+1)[minX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
//Right edge : read->leftshiftby3->read
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(y)[maxX] == label);
mask[threadIdx.x] = (mask[threadIdx.x] << 1) | (labeledMask.ptr(y+1)[maxX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
//Corners
if(threadIdx.x == 0) //Bottom left corner (0,0)
{
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(minY)[minX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
if(threadIdx.x == 8) //Bottom right corner (1,0)
{
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(minY)[maxX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
if(threadIdx.x == 16) // Top right corner (1,1)
{
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(maxY)[maxX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
if(threadIdx.x == 24) // Top left corner (0,1)
{
mask[threadIdx.x] = 0;
mask[threadIdx.x] = (labeledMask.ptr(maxY)[minX] == label);
thread_perimeter[threadIdx.x] += lookup[mask[threadIdx.x]];
}
__syncthreads();
/////////////////////////////////////////////////////////ELLIPSE/////////////////////////////////////////////////////////////////////////
//Walk through the labeled Image
for( int x = minX + threadIdx.x ; x <= maxX ; x+=blockDim.x)
{
float cx = (float)x - midX;
for( int y = minY ; y <=maxY ; y++)
{
float cy = (float)y - midY;
bool temp = (labeledMask.ptr(y))[x] == label;
sx[threadIdx.x] += ( temp ? cx : 0);
sy[threadIdx.x] += ( temp ? cy : 0);
sxy[threadIdx.x] += ( temp ? cx*cy : 0);
ssqx[threadIdx.x] += ( temp ? cx*cx : 0);
ssqy[threadIdx.x] += ( temp ? cy*cy : 0);
}
}
__syncthreads();
/************************************************PARALLEL SCAN OPERATION FOR ALL THE SHARED ARRAYS********************************************************/
unsigned int s;
for(s = blockDim.x /2 ; s > 0 ; s >>= 1)
{
if(threadIdx.x < s)
{
//Area
thread_area[threadIdx.x] += thread_area[threadIdx.x + s];
//Perimeter
thread_perimeter[threadIdx.x] += thread_perimeter[threadIdx.x + s];
//Ellipse
sx[threadIdx.x] += sx[threadIdx.x + s];
sy[threadIdx.x] += sy[threadIdx.x + s];
sxy[threadIdx.x] += sxy[threadIdx.x + s];
ssqx[threadIdx.x] += ssqx[threadIdx.x + s];
ssqy[threadIdx.x] += ssqy[threadIdx.x + s];
}
__syncthreads();
}
/*********************************************************************CONSOLIDATE!!!!*******************************************************************/
if (threadIdx.x == 0)
{
//Area
areaRes[blockIdx.x] = thread_area[0];
//Perimeter
perimeterRes[blockIdx.x] = thread_perimeter[0];
//Ellipse
float frac = 1.0 / 12.0;
float root = sqrtf(8.0);
float area = (float)areaRes[blockIdx.x];
float xbar = (float)sx[0] / area;
float ybar = (float)sy[0] / area;
float mxx = (float)ssqx[0]/area - xbar*xbar+ frac;
float myy = (float)ssqy[0]/area - ybar*ybar+ frac;
float mxy = (float)sxy[0]/area - xbar*ybar;
float delta = sqrtf((mxx-myy)*(mxx-myy) + 4.0 * mxy * mxy); //discriminant = sqrt(b*b-4*a*c)
majorAxis[blockIdx.x] = root*sqrtf(mxx+myy+delta);
minorAxis[blockIdx.x] = root*sqrtf(mxx+myy-delta);
ecc[blockIdx.x] = sqrtf(majorAxis[blockIdx.x] * majorAxis[blockIdx.x] - minorAxis[blockIdx.x] * minorAxis[blockIdx.x])/majorAxis[blockIdx.x];
}
}
__global__ void extentratio(const int *boundingBoxInfo , const int compCount , const int *areaRes , float *extent_ratio)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid < compCount)
{
int xmin = boundingBoxInfo[compCount + tid];
int xmax = boundingBoxInfo[2 * compCount + tid];
int ymin = boundingBoxInfo[3 * compCount + tid];
int ymax = boundingBoxInfo[4 * compCount + tid];
extent_ratio[tid] = (float)areaRes[tid] / (float)((xmax-xmin+1) * (ymax-ymin+1));
}
}
__global__ void circularity(const int compCount , const int *areaRes , const float *perimeterRes , float *circ)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid < compCount)
{
circ[tid] = (4.0 * 3.14159265359 * (float)areaRes[tid]) / (perimeterRes[tid] * perimeterRes[tid]) ;
}
}
__global__ void small_features(const int *boundingBoxInfo , const int compCount , const int *areaRes , const float *perimeterRes , float *extent_ratio , float *circ)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if( tid < compCount)
{
int xmin = boundingBoxInfo[compCount + tid];
int xmax = boundingBoxInfo[2 * compCount + tid];
int ymin = boundingBoxInfo[3 * compCount + tid];
int ymax = boundingBoxInfo[4 * compCount + tid];
extent_ratio[tid] = (float)areaRes[tid] / (float)((xmax-xmin+1) * (ymax-ymin+1));
circ[tid] = (4.0 * 3.14159265359 * (float)areaRes[tid]) / (perimeterRes[tid] * perimeterRes[tid]);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void AreaCaller(const int* boundingBoxInfo , const int compCount ,const cv::gpu::PtrStep_<int> labeledMask, int *areaRes, cudaStream_t stream)
{
dim3 threads(32,1);
dim3 grid(compCount, 1);
area<<<grid,threads,0,stream>>>(boundingBoxInfo, compCount, labeledMask,areaRes);
cudaGetLastError();
if(stream == 0)
cudaDeviceSynchronize();
}
void PerimeterCaller(const int* boundingBoxInfo , const int compCount ,const cv::gpu::PtrStep_<int> labeledMask , float *perimeterRes, cudaStream_t stream)
{
dim3 threads(32,1);
dim3 grid(compCount,1);
perimeter <<< grid, threads, 0 ,stream >>>(boundingBoxInfo, compCount , labeledMask, perimeterRes);
cudaGetLastError();
if(stream == 0)
cudaDeviceSynchronize();
}
void EllipseCaller(const int* boundingBoxInfo , const int compCount , const cv::gpu::PtrStep_ <int> labeledMask , int *areaRes , float *majorAxis , float *minorAxis , float *ecc, cudaStream_t stream)
{
dim3 threads(32,1);
dim3 grid(compCount , 1);
ellipse <<<grid , threads , 0 , stream >>>(boundingBoxInfo , compCount , labeledMask , areaRes , majorAxis , minorAxis , ecc);
cudaGetLastError();
if(stream == 0)
cudaDeviceSynchronize();
}
void ExtentRatioCaller(const int *boundingBoxInfo , const int compCount , const int *areaRes , float *extent_ratio , cudaStream_t stream)
{
dim3 threads(32,1);
dim3 grid((compCount + 32 -1)/32 , 1);
extentratio <<< grid , threads , 0 , stream >>>(boundingBoxInfo , compCount , areaRes , extent_ratio);
cudaGetLastError();
if(stream == 0)
cudaDeviceSynchronize();
}
void CircularityCaller(const int compCount , const int *areaRes , const float *perimeterRes , float *circ, cudaStream_t stream)
{
dim3 threads(32,1);
dim3 grid((compCount + 32 - 1)/32 , 1);
circularity <<< grid , threads , 0 , stream >>>(compCount , areaRes , perimeterRes , circ);
cudaGetLastError();
if(stream == 0)
cudaDeviceSynchronize();
}
void BigFeaturesCaller(const int* boundingBoxInfo , const int compCount , const cv::gpu::PtrStep_<int> labeledMask , int* areaRes , float* perimeterRes , float* majorAxis , float* minorAxis , float* ecc, cudaStream_t stream)
{
dim3 threads(32,1);
dim3 grid(compCount , 1);
big_features <<< grid , threads , 0 , stream >>> (boundingBoxInfo , compCount , labeledMask , areaRes , perimeterRes , majorAxis , minorAxis , ecc );
cudaGetLastError();
if(stream == 0)
cudaDeviceSynchronize();
}
void SmallFeaturesCaller(const int *boundingBoxInfo , const int compCount , const int *areaRes , const float *perimeterRes , float *extent_ratio , float* circ , cudaStream_t stream)
{
dim3 threads(32,1);
dim3 grid((compCount + 32 -1)/32 , 1);
small_features <<<grid , threads , 0 , stream >>> (boundingBoxInfo , compCount , areaRes , perimeterRes , extent_ratio , circ);
cudaGetLastError();
if(stream == 0)
cudaDeviceSynchronize();
}
}
}
|
1890f79f90b98d98896de959fe13a0125b5ef039.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This uses a lot of code from Caffe (http://caffe.berkeleyvision.org/);
// sources are clearly marked. Below we reproduce the original license of
// the Caffe software.
/*
Copyright (c) 2014, The Regents of the University of California (Regents)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#undef _GLIBCXX_ATOMIC_BUILTINS
// (borrowed from Caffe: https://github.com/BVLC/caffe/blob/master/src/caffe/caffe_common.hpp)
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// CUDA: thread number configuration.
// Use 1024 threads per block, which requires cuda sm_2x or above,
// or fall back to attempt compatibility (best of luck to you).
#if __CUDA_ARCH__ >= 200
const int CUDA_NUM_THREADS = 1024;
#else
const int CUDA_NUM_THREADS = 512;
#endif
// CUDA: number of blocks for threads.
inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
// (borrowed from Caffe: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu)
// Kernels for fast unfold + copy
__global__ void im2col_kernel(const int n, const float* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
float* data_col) {
CUDA_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * kernel_h * kernel_w;
int h_in = h_out * stride_h - pad_h;
int w_in = w_out * stride_w - pad_w;
float* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
float* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
hipLaunchKernelGGL(( im2col_kernel), dim3(GET_BLOCKS(num_kernels)),
dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, height_col,
width_col, data_col);
}
__global__ void col2im_kernel(const int n, const float* data_col,
const int height, const int width, const int channels,
const int patch_h, const int patch_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
float* data_im) {
CUDA_KERNEL_LOOP(index, n) {
float val = 0;
int w = index % width + pad_w;
int h = (index / width) % height + pad_h;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
/*
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
// the col location: [c * width * height + h_out, w_out]
int c_col = c * patch_h * patch_w + (h - h_col * stride_h) * ksize
+ (w - w_col * stride_w);
val += data_col[(c_col * height_col + h_col) * width_col + w_col];
}
}
*/
// equivalent implementation
int offset =
(c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;
int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;
int coeff_w_col = (1 - stride_w * height_col * width_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] = val;
}
}
void col2im(const float* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, float* data_im) {
int height_col = (height + 2 * pad_h - patch_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - patch_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
hipLaunchKernelGGL(( col2im_kernel), dim3(GET_BLOCKS(num_kernels)),
dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, channels, patch_h, patch_w,
pad_h, pad_w, stride_h, stride_w,
height_col, width_col, data_im);
}
// Theano op code
// Authors: Arjun Jain, Frederic Bastien, Jan Schluter
// Reference code: https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu
// and https://github.com/torch/cunn/blob/master/SpatialConvolutionMM.cu
CudaNdarray* corrMM(CudaNdarray *const bottom,
CudaNdarray *const weight,
CudaNdarray *const top,
const int direction,
const int dH = 1,
const int dW = 1,
const int padH = 0,
const int padW = 0)
{
if (bottom->nd != 4)
{
PyErr_SetString(PyExc_ValueError, "GpuCorrMM requires bottom of 4D");
return NULL;
}
if (!CudaNdarray_is_c_contiguous(bottom))
{
PyErr_Format(PyExc_ValueError,
"GpuCorrMM requires bottom to be C-contiguous, "
"but strides are: %d %d %d %d\n",
CudaNdarray_HOST_STRIDES(bottom)[0],
CudaNdarray_HOST_STRIDES(bottom)[1],
CudaNdarray_HOST_STRIDES(bottom)[2],
CudaNdarray_HOST_STRIDES(bottom)[3]);
return NULL;
}
if (weight->nd != 4)
{
PyErr_SetString(PyExc_ValueError, "GpuCorrMM requires weight of 4D");
return NULL;
}
if (!CudaNdarray_is_c_contiguous(weight))
{
PyErr_Format(PyExc_ValueError,
"GpuCorrMM requires weight to be C-contiguous, "
"but strides are: %d %d %d %d\n",
CudaNdarray_HOST_STRIDES(weight)[0],
CudaNdarray_HOST_STRIDES(weight)[1],
CudaNdarray_HOST_STRIDES(weight)[2],
CudaNdarray_HOST_STRIDES(weight)[3]);
return NULL;
}
if (top->nd != 4)
{
PyErr_SetString(PyExc_ValueError, "GpuCorrMM requires top of 4D");
return NULL;
}
if (!CudaNdarray_is_c_contiguous(top))
{
PyErr_Format(PyExc_ValueError,
"GpuCorrMM requires top to be C-contiguous, "
"but strides are: %d %d %d %d\n",
CudaNdarray_HOST_STRIDES(top)[0],
CudaNdarray_HOST_STRIDES(top)[1],
CudaNdarray_HOST_STRIDES(top)[2],
CudaNdarray_HOST_STRIDES(top)[3]);
return NULL;
}
// Extract some shape information for later and check shape consistency
// bottom: (batchSize, nChannels, bottomHeight, bottomWidth)
const int batchSize = CudaNdarray_HOST_DIMS(bottom)[0];
const int nChannels = CudaNdarray_HOST_DIMS(bottom)[1];
const int bottomHeight = CudaNdarray_HOST_DIMS(bottom)[2];
const int bottomWidth = CudaNdarray_HOST_DIMS(bottom)[3];
// weights: (nFilters, nChannels, rows, columns)
const int nFilters = CudaNdarray_HOST_DIMS(weight)[0];
const int kH = CudaNdarray_HOST_DIMS(weight)[2];
const int kW = CudaNdarray_HOST_DIMS(weight)[3];
if (nChannels != CudaNdarray_HOST_DIMS(weight)[1]) {
PyErr_SetString(PyExc_ValueError,
"GpuCorrMM images and kernel must have the same stack size\n");
return NULL;
}
// top: (batchSize, nFilters, topHeight, topWidth)
const int topHeight = (bottomHeight + 2*padH - kH) / dH + 1;
const int topWidth = (bottomWidth + 2*padW - kW) / dW + 1;
if (batchSize != CudaNdarray_HOST_DIMS(top)[0] ||
nFilters != CudaNdarray_HOST_DIMS(top)[1] ||
topHeight != CudaNdarray_HOST_DIMS(top)[2] ||
topWidth != CudaNdarray_HOST_DIMS(top)[3]) {
PyErr_Format(PyExc_ValueError,
"GpuCorrMM shape inconsistency:\n"
" bottom shape: %d %d %d %d\n"
" weight shape: %d %d %d %d\n"
" top shape: %d %d %d %d (expected %d %d %d %d)\n",
batchSize, nChannels, bottomHeight, bottomWidth,
nFilters, nChannels, kH, kW,
CudaNdarray_HOST_DIMS(top)[0], CudaNdarray_HOST_DIMS(top)[1],
CudaNdarray_HOST_DIMS(top)[2], CudaNdarray_HOST_DIMS(top)[3],
batchSize, nFilters, topHeight, topWidth);
return NULL;
}
// Create temporary columns
int col_dim[2];
col_dim[0] = nChannels * kW * kH;
col_dim[1] = topHeight * topWidth;
CudaNdarray* col = (CudaNdarray*)CudaNdarray_NewDims(2, col_dim);
if (NULL == col)
{
PyErr_Format(PyExc_RuntimeError,
"GpuCorrMM failed to allocate working memory of %d x %d\n",
col_dim[0], col_dim[1]);
return NULL;
}
// Define some useful variables
const int bottom_stride = CudaNdarray_HOST_STRIDES(bottom)[0];
const int top_stride = CudaNdarray_HOST_STRIDES(top)[0];
const int K_ = col_dim[0];
const int N_ = col_dim[1];
const int M_ = nFilters;
const float one = 1.0f;
const float zero = 0.0f;
CudaNdarray *output;
if (direction == 0) { // forward pass
output = top;
// valid correlation: im2col, then gemm
// Iterate over batch
for (int n = 0; n < batchSize; n++) {
// First, im2col
im2col(bottom->devdata + n * bottom_stride, nChannels, bottomHeight,
bottomWidth, kH, kW, padH, padW, dH, dW, col->devdata);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
PyErr_Format(PyExc_RuntimeError,
"GpuCorrMM encountered a CUDA error in im2col: %s\n"
"This could be a known bug in CUDA, please see the "
"GpuCorrMM() documentation.\n",
hipGetErrorString(err));
Py_DECREF(col);
return NULL;
}
// Second, gemm
hipblasStatus_t status = hipblasSgemm(handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
N_, M_, K_,
&one,
col->devdata, N_,
weight->devdata, K_,
&zero,
top->devdata + n * top_stride, N_);
if (status != HIPBLAS_STATUS_SUCCESS) {
PyErr_Format(PyExc_RuntimeError,
"GpuCorrMM encountered a CUBLAS error: %s\n"
"This could be a known bug in CUDA, please see the "
"GpuCorrMM() documentation.\n",
cublasGetErrorString(status));
Py_DECREF(col);
return NULL;
}
}
/*
// Original caffe code for comparison
// https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu
// Note that this is for grouped convolution; we can ignore groups here,
// but the group-related offsets help explain what M_, N_ and K_ are
int weight_offset = M_ * K_;
int col_offset = K_ * N_;
int top_offset = M_ * N_;
for (int n = 0; n < num_; ++n) {
// First, im2col
im2col_gpu(bottom_data + bottom[i]->offset(n), channels_, height_,
width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_,
col_data);
// Second, innerproduct with groups
for (int g = 0; g < group_; ++g) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, K_,
(Dtype)1., weight + weight_offset * g, col_data + col_offset * g,
(Dtype)0., top_data + (*top)[i]->offset(n) + top_offset * g);
== (see https://github.com/BVLC/caffe/blob/master/src/caffe/util/math_functions.cu#L16)
hipblasSgemm(HIPBLAS_OP_N, HIPBLAS_OP_N,
N_, M_, K_,
1.,
col_data + col_offset * g, N_,
weight + weight_offset * g, K_,
0.,
top_data + (*top)[i]->offset(n) + top_offset * g, N_);
}
}
*/
}
else if (direction == 1) { // backprop wrt. weights
output = weight;
// valid convolution: im2col, then gemm
// Iterate over batch
for (int n = 0; n < batchSize; n++) {
// First, im2col
im2col(bottom->devdata + n * bottom_stride, nChannels, bottomHeight,
bottomWidth, kH, kW, padH, padW, dH, dW, col->devdata);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
PyErr_Format(PyExc_RuntimeError,
"GpuCorrMM encountered a CUDA error in im2col: %s\n"
"This could be a known bug in CUDA, please see the "
"GpuCorrMM() documentation.\n",
hipGetErrorString(err));
Py_DECREF(col);
return NULL;
}
// Second, gemm
// Note that we accumulate into weight. We do so by setting beta = 0
// for the first iteration and beta = 1 for subsequent ones. (This
// is faster than setting weight to all zeros before the loop.)
hipblasStatus_t status = hipblasSgemm(handle,
HIPBLAS_OP_T, HIPBLAS_OP_N,
K_, M_, N_,
&one,
col->devdata, N_,
top->devdata + n * top_stride, N_,
(n == 0) ? &zero : &one,
weight->devdata, K_);
if (status != HIPBLAS_STATUS_SUCCESS) {
PyErr_Format(PyExc_RuntimeError,
"GpuCorrMM encountered a CUBLAS error: %s\n"
"This could be a known bug in CUDA, please see the "
"GpuCorrMM() documentation.\n",
cublasGetErrorString(status));
Py_DECREF(col);
return NULL;
}
}
/*
// Original caffe code for comparison
// https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu
// Note that this is for grouped convolution; we can ignore groups
for (int n = 0; n < num_; ++n) {
// Since we saved memory in the forward pass by not storing all col
// data, we will need to recompute them.
im2col_gpu(bottom_data + (*bottom)[i]->offset(n), channels_, height_,
width_, kernel_h_, kernel_w_, pad_h_, pad_w_,
stride_h_, stride_w_, col_data);
// gradient w.r.t. weight. Note that we will accumulate diffs.
for (int g = 0; g < group_; ++g) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, K_, N_,
(Dtype)1., top_diff + top[i]->offset(n) + top_offset * g,
col_data + col_offset * g, (Dtype)1.,
weight_diff + weight_offset * g);
== (see https://github.com/BVLC/caffe/blob/master/src/caffe/util/math_functions.cu#L16)
hipblasSgemm(HIPBLAS_OP_T, HIPBLAS_OP_N, K_, M_, N_,
1.0,
col_data + col_offset * g, N_,
top_diff + top[i]->offset(n) + top_offset * g, N_,
1.0,
weight_diff + weight_offset * g, K_);
}
}
*/
}
else if (direction == 2) { // backprop wrt. inputs
output = bottom;
// full convolution: gemm, then col2im
// Iterate over batch
for (int n = 0; n < batchSize; n++) {
// gemm into columns
hipblasStatus_t status = hipblasSgemm(handle,
HIPBLAS_OP_N, HIPBLAS_OP_T,
N_, K_, M_,
&one,
top->devdata + n * top_stride, N_,
weight->devdata, K_,
&zero,
col->devdata, N_);
if (status != HIPBLAS_STATUS_SUCCESS) {
PyErr_Format(PyExc_RuntimeError,
"GpuCorrMM encountered a CUBLAS error: %s\n"
"This could be a known bug in CUDA, please see the "
"GpuCorrMM() documentation.\n",
cublasGetErrorString(status));
Py_DECREF(col);
return NULL;
}
// col2im back to the data
col2im(col->devdata, nChannels, bottomHeight, bottomWidth,
kH, kW, padH, padW, dH, dW, bottom->devdata + n * bottom_stride);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
PyErr_Format(PyExc_RuntimeError,
"GpuCorrMM encountered a CUDA error in col2im: %s\n"
"This could be a known bug in CUDA, please see the "
"GpuCorrMM() documentation.\n",
hipGetErrorString(err));
Py_DECREF(col);
return NULL;
}
}
/*
// Original caffe code for comparison
// https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu
for (int n = 0; n < num_; ++n) {
// gradient w.r.t. bottom data, if necessary
if (propagate_down[i]) {
for (int g = 0; g < group_; ++g) {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_,
(Dtype)1., weight + weight_offset * g,
top_diff + top[i]->offset(n) + top_offset * g,
(Dtype)0., col_diff + col_offset * g);
== (see https://github.com/BVLC/caffe/blob/master/src/caffe/util/math_functions.cu#L16)
hipblasSgemm(HIPBLAS_OP_N, HIPBLAS_OP_T, N_, K_, M_,
1.,
top_diff + top[i]->offset(n) + top_offset * g, N_,
weight + weight_offset * g, K_,
0.,
col_diff + col_offset * g, N_);
}
// col2im back to the data
col2im_gpu(col_diff, channels_, height_, width_,
kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_,
bottom_diff + (*bottom)[i]->offset(n));
}
}
*/
}
// Free temporary columns
Py_DECREF(col);
// Note that we don't change the refcount of the output matrix here. Output
// (re)allocation and refcounting is done in BaseGpuCorrMM.c_code_helper();
// in here output is just aliased to one of bottom, weights, or top.
return output;
}
| 1890f79f90b98d98896de959fe13a0125b5ef039.cu | // This uses a lot of code from Caffe (http://caffe.berkeleyvision.org/);
// sources are clearly marked. Below we reproduce the original license of
// the Caffe software.
/*
Copyright (c) 2014, The Regents of the University of California (Regents)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#undef _GLIBCXX_ATOMIC_BUILTINS
// (borrowed from Caffe: https://github.com/BVLC/caffe/blob/master/src/caffe/caffe_common.hpp)
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// CUDA: thread number configuration.
// Use 1024 threads per block, which requires cuda sm_2x or above,
// or fall back to attempt compatibility (best of luck to you).
#if __CUDA_ARCH__ >= 200
const int CUDA_NUM_THREADS = 1024;
#else
const int CUDA_NUM_THREADS = 512;
#endif
// CUDA: number of blocks for threads.
inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
// (borrowed from Caffe: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu)
// Kernels for fast unfold + copy
__global__ void im2col_kernel(const int n, const float* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
float* data_col) {
CUDA_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * kernel_h * kernel_w;
int h_in = h_out * stride_h - pad_h;
int w_in = w_out * stride_w - pad_w;
float* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
float* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
im2col_kernel<<<GET_BLOCKS(num_kernels),
CUDA_NUM_THREADS>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, height_col,
width_col, data_col);
}
__global__ void col2im_kernel(const int n, const float* data_col,
const int height, const int width, const int channels,
const int patch_h, const int patch_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
float* data_im) {
CUDA_KERNEL_LOOP(index, n) {
float val = 0;
int w = index % width + pad_w;
int h = (index / width) % height + pad_h;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
/*
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
// the col location: [c * width * height + h_out, w_out]
int c_col = c * patch_h * patch_w + (h - h_col * stride_h) * ksize
+ (w - w_col * stride_w);
val += data_col[(c_col * height_col + h_col) * width_col + w_col];
}
}
*/
// equivalent implementation
int offset =
(c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;
int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;
int coeff_w_col = (1 - stride_w * height_col * width_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] = val;
}
}
void col2im(const float* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, float* data_im) {
int height_col = (height + 2 * pad_h - patch_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - patch_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
col2im_kernel<<<GET_BLOCKS(num_kernels),
CUDA_NUM_THREADS>>>(
num_kernels, data_col, height, width, channels, patch_h, patch_w,
pad_h, pad_w, stride_h, stride_w,
height_col, width_col, data_im);
}
// Theano op code
// Authors: Arjun Jain, Frederic Bastien, Jan Schluter
// Reference code: https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu
// and https://github.com/torch/cunn/blob/master/SpatialConvolutionMM.cu
CudaNdarray* corrMM(CudaNdarray *const bottom,
CudaNdarray *const weight,
CudaNdarray *const top,
const int direction,
const int dH = 1,
const int dW = 1,
const int padH = 0,
const int padW = 0)
{
if (bottom->nd != 4)
{
PyErr_SetString(PyExc_ValueError, "GpuCorrMM requires bottom of 4D");
return NULL;
}
if (!CudaNdarray_is_c_contiguous(bottom))
{
PyErr_Format(PyExc_ValueError,
"GpuCorrMM requires bottom to be C-contiguous, "
"but strides are: %d %d %d %d\n",
CudaNdarray_HOST_STRIDES(bottom)[0],
CudaNdarray_HOST_STRIDES(bottom)[1],
CudaNdarray_HOST_STRIDES(bottom)[2],
CudaNdarray_HOST_STRIDES(bottom)[3]);
return NULL;
}
if (weight->nd != 4)
{
PyErr_SetString(PyExc_ValueError, "GpuCorrMM requires weight of 4D");
return NULL;
}
if (!CudaNdarray_is_c_contiguous(weight))
{
PyErr_Format(PyExc_ValueError,
"GpuCorrMM requires weight to be C-contiguous, "
"but strides are: %d %d %d %d\n",
CudaNdarray_HOST_STRIDES(weight)[0],
CudaNdarray_HOST_STRIDES(weight)[1],
CudaNdarray_HOST_STRIDES(weight)[2],
CudaNdarray_HOST_STRIDES(weight)[3]);
return NULL;
}
if (top->nd != 4)
{
PyErr_SetString(PyExc_ValueError, "GpuCorrMM requires top of 4D");
return NULL;
}
if (!CudaNdarray_is_c_contiguous(top))
{
PyErr_Format(PyExc_ValueError,
"GpuCorrMM requires top to be C-contiguous, "
"but strides are: %d %d %d %d\n",
CudaNdarray_HOST_STRIDES(top)[0],
CudaNdarray_HOST_STRIDES(top)[1],
CudaNdarray_HOST_STRIDES(top)[2],
CudaNdarray_HOST_STRIDES(top)[3]);
return NULL;
}
// Extract some shape information for later and check shape consistency
// bottom: (batchSize, nChannels, bottomHeight, bottomWidth)
const int batchSize = CudaNdarray_HOST_DIMS(bottom)[0];
const int nChannels = CudaNdarray_HOST_DIMS(bottom)[1];
const int bottomHeight = CudaNdarray_HOST_DIMS(bottom)[2];
const int bottomWidth = CudaNdarray_HOST_DIMS(bottom)[3];
// weights: (nFilters, nChannels, rows, columns)
const int nFilters = CudaNdarray_HOST_DIMS(weight)[0];
const int kH = CudaNdarray_HOST_DIMS(weight)[2];
const int kW = CudaNdarray_HOST_DIMS(weight)[3];
if (nChannels != CudaNdarray_HOST_DIMS(weight)[1]) {
PyErr_SetString(PyExc_ValueError,
"GpuCorrMM images and kernel must have the same stack size\n");
return NULL;
}
// top: (batchSize, nFilters, topHeight, topWidth)
const int topHeight = (bottomHeight + 2*padH - kH) / dH + 1;
const int topWidth = (bottomWidth + 2*padW - kW) / dW + 1;
if (batchSize != CudaNdarray_HOST_DIMS(top)[0] ||
nFilters != CudaNdarray_HOST_DIMS(top)[1] ||
topHeight != CudaNdarray_HOST_DIMS(top)[2] ||
topWidth != CudaNdarray_HOST_DIMS(top)[3]) {
PyErr_Format(PyExc_ValueError,
"GpuCorrMM shape inconsistency:\n"
" bottom shape: %d %d %d %d\n"
" weight shape: %d %d %d %d\n"
" top shape: %d %d %d %d (expected %d %d %d %d)\n",
batchSize, nChannels, bottomHeight, bottomWidth,
nFilters, nChannels, kH, kW,
CudaNdarray_HOST_DIMS(top)[0], CudaNdarray_HOST_DIMS(top)[1],
CudaNdarray_HOST_DIMS(top)[2], CudaNdarray_HOST_DIMS(top)[3],
batchSize, nFilters, topHeight, topWidth);
return NULL;
}
// Create temporary columns
int col_dim[2];
col_dim[0] = nChannels * kW * kH;
col_dim[1] = topHeight * topWidth;
CudaNdarray* col = (CudaNdarray*)CudaNdarray_NewDims(2, col_dim);
if (NULL == col)
{
PyErr_Format(PyExc_RuntimeError,
"GpuCorrMM failed to allocate working memory of %d x %d\n",
col_dim[0], col_dim[1]);
return NULL;
}
// Define some useful variables
const int bottom_stride = CudaNdarray_HOST_STRIDES(bottom)[0];
const int top_stride = CudaNdarray_HOST_STRIDES(top)[0];
const int K_ = col_dim[0];
const int N_ = col_dim[1];
const int M_ = nFilters;
const float one = 1.0f;
const float zero = 0.0f;
CudaNdarray *output;
if (direction == 0) { // forward pass
output = top;
// valid correlation: im2col, then gemm
// Iterate over batch
for (int n = 0; n < batchSize; n++) {
// First, im2col
im2col(bottom->devdata + n * bottom_stride, nChannels, bottomHeight,
bottomWidth, kH, kW, padH, padW, dH, dW, col->devdata);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
PyErr_Format(PyExc_RuntimeError,
"GpuCorrMM encountered a CUDA error in im2col: %s\n"
"This could be a known bug in CUDA, please see the "
"GpuCorrMM() documentation.\n",
cudaGetErrorString(err));
Py_DECREF(col);
return NULL;
}
// Second, gemm
cublasStatus_t status = cublasSgemm(handle,
CUBLAS_OP_N, CUBLAS_OP_N,
N_, M_, K_,
&one,
col->devdata, N_,
weight->devdata, K_,
&zero,
top->devdata + n * top_stride, N_);
if (status != CUBLAS_STATUS_SUCCESS) {
PyErr_Format(PyExc_RuntimeError,
"GpuCorrMM encountered a CUBLAS error: %s\n"
"This could be a known bug in CUDA, please see the "
"GpuCorrMM() documentation.\n",
cublasGetErrorString(status));
Py_DECREF(col);
return NULL;
}
}
/*
// Original caffe code for comparison
// https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu
// Note that this is for grouped convolution; we can ignore groups here,
// but the group-related offsets help explain what M_, N_ and K_ are
int weight_offset = M_ * K_;
int col_offset = K_ * N_;
int top_offset = M_ * N_;
for (int n = 0; n < num_; ++n) {
// First, im2col
im2col_gpu(bottom_data + bottom[i]->offset(n), channels_, height_,
width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_,
col_data);
// Second, innerproduct with groups
for (int g = 0; g < group_; ++g) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, K_,
(Dtype)1., weight + weight_offset * g, col_data + col_offset * g,
(Dtype)0., top_data + (*top)[i]->offset(n) + top_offset * g);
== (see https://github.com/BVLC/caffe/blob/master/src/caffe/util/math_functions.cu#L16)
cublasSgemm(CUBLAS_OP_N, CUBLAS_OP_N,
N_, M_, K_,
1.,
col_data + col_offset * g, N_,
weight + weight_offset * g, K_,
0.,
top_data + (*top)[i]->offset(n) + top_offset * g, N_);
}
}
*/
}
else if (direction == 1) { // backprop wrt. weights
output = weight;
// valid convolution: im2col, then gemm
// Iterate over batch
for (int n = 0; n < batchSize; n++) {
// First, im2col
im2col(bottom->devdata + n * bottom_stride, nChannels, bottomHeight,
bottomWidth, kH, kW, padH, padW, dH, dW, col->devdata);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
PyErr_Format(PyExc_RuntimeError,
"GpuCorrMM encountered a CUDA error in im2col: %s\n"
"This could be a known bug in CUDA, please see the "
"GpuCorrMM() documentation.\n",
cudaGetErrorString(err));
Py_DECREF(col);
return NULL;
}
// Second, gemm
// Note that we accumulate into weight. We do so by setting beta = 0
// for the first iteration and beta = 1 for subsequent ones. (This
// is faster than setting weight to all zeros before the loop.)
cublasStatus_t status = cublasSgemm(handle,
CUBLAS_OP_T, CUBLAS_OP_N,
K_, M_, N_,
&one,
col->devdata, N_,
top->devdata + n * top_stride, N_,
(n == 0) ? &zero : &one,
weight->devdata, K_);
if (status != CUBLAS_STATUS_SUCCESS) {
PyErr_Format(PyExc_RuntimeError,
"GpuCorrMM encountered a CUBLAS error: %s\n"
"This could be a known bug in CUDA, please see the "
"GpuCorrMM() documentation.\n",
cublasGetErrorString(status));
Py_DECREF(col);
return NULL;
}
}
/*
// Original caffe code for comparison
// https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu
// Note that this is for grouped convolution; we can ignore groups
for (int n = 0; n < num_; ++n) {
// Since we saved memory in the forward pass by not storing all col
// data, we will need to recompute them.
im2col_gpu(bottom_data + (*bottom)[i]->offset(n), channels_, height_,
width_, kernel_h_, kernel_w_, pad_h_, pad_w_,
stride_h_, stride_w_, col_data);
// gradient w.r.t. weight. Note that we will accumulate diffs.
for (int g = 0; g < group_; ++g) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, K_, N_,
(Dtype)1., top_diff + top[i]->offset(n) + top_offset * g,
col_data + col_offset * g, (Dtype)1.,
weight_diff + weight_offset * g);
== (see https://github.com/BVLC/caffe/blob/master/src/caffe/util/math_functions.cu#L16)
cublasSgemm(CUBLAS_OP_T, CUBLAS_OP_N, K_, M_, N_,
1.0,
col_data + col_offset * g, N_,
top_diff + top[i]->offset(n) + top_offset * g, N_,
1.0,
weight_diff + weight_offset * g, K_);
}
}
*/
}
else if (direction == 2) { // backprop wrt. inputs
output = bottom;
// full convolution: gemm, then col2im
// Iterate over batch
for (int n = 0; n < batchSize; n++) {
// gemm into columns
cublasStatus_t status = cublasSgemm(handle,
CUBLAS_OP_N, CUBLAS_OP_T,
N_, K_, M_,
&one,
top->devdata + n * top_stride, N_,
weight->devdata, K_,
&zero,
col->devdata, N_);
if (status != CUBLAS_STATUS_SUCCESS) {
PyErr_Format(PyExc_RuntimeError,
"GpuCorrMM encountered a CUBLAS error: %s\n"
"This could be a known bug in CUDA, please see the "
"GpuCorrMM() documentation.\n",
cublasGetErrorString(status));
Py_DECREF(col);
return NULL;
}
// col2im back to the data
col2im(col->devdata, nChannels, bottomHeight, bottomWidth,
kH, kW, padH, padW, dH, dW, bottom->devdata + n * bottom_stride);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
PyErr_Format(PyExc_RuntimeError,
"GpuCorrMM encountered a CUDA error in col2im: %s\n"
"This could be a known bug in CUDA, please see the "
"GpuCorrMM() documentation.\n",
cudaGetErrorString(err));
Py_DECREF(col);
return NULL;
}
}
/*
// Original caffe code for comparison
// https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu
for (int n = 0; n < num_; ++n) {
// gradient w.r.t. bottom data, if necessary
if (propagate_down[i]) {
for (int g = 0; g < group_; ++g) {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_,
(Dtype)1., weight + weight_offset * g,
top_diff + top[i]->offset(n) + top_offset * g,
(Dtype)0., col_diff + col_offset * g);
== (see https://github.com/BVLC/caffe/blob/master/src/caffe/util/math_functions.cu#L16)
cublasSgemm(CUBLAS_OP_N, CUBLAS_OP_T, N_, K_, M_,
1.,
top_diff + top[i]->offset(n) + top_offset * g, N_,
weight + weight_offset * g, K_,
0.,
col_diff + col_offset * g, N_);
}
// col2im back to the data
col2im_gpu(col_diff, channels_, height_, width_,
kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_,
bottom_diff + (*bottom)[i]->offset(n));
}
}
*/
}
// Free temporary columns
Py_DECREF(col);
// Note that we don't change the refcount of the output matrix here. Output
// (re)allocation and refcounting is done in BaseGpuCorrMM.c_code_helper();
// in here output is just aliased to one of bottom, weights, or top.
return output;
}
|
0b13f763510763b3f25cebfb1b2887250e34b3c5.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "column_filter.h"
namespace filter
{
template void linearColumn<float, float>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
| 0b13f763510763b3f25cebfb1b2887250e34b3c5.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "column_filter.h"
namespace filter
{
template void linearColumn<float, float>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
b025ae0b124af5a047138030e124c5d49fafe0d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "optixMultiGPU.h"
#include <sutil/vec_math.h>
#include <cuda/helpers.h>
#include <cuda/random.h>
extern "C" {
__constant__ Params params;
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
struct RadiancePRD
{
// TODO: move some state directly into payload registers?
float3 emitted;
float3 radiance;
float3 attenuation;
float3 origin;
float3 direction;
unsigned int seed;
int countEmitted;
int done;
int pad;
};
struct Onb
{
__forceinline__ __device__ Onb(const float3& normal)
{
m_normal = normal;
if( fabs(m_normal.x) > fabs(m_normal.z) )
{
m_binormal.x = -m_normal.y;
m_binormal.y = m_normal.x;
m_binormal.z = 0;
}
else
{
m_binormal.x = 0;
m_binormal.y = -m_normal.z;
m_binormal.z = m_normal.y;
}
m_binormal = normalize(m_binormal);
m_tangent = cross( m_binormal, m_normal );
}
__forceinline__ __device__ void inverse_transform(float3& p) const
{
p = p.x*m_tangent + p.y*m_binormal + p.z*m_normal;
}
float3 m_tangent;
float3 m_binormal;
float3 m_normal;
};
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
static __forceinline__ __device__ void* unpackPointer( unsigned int i0, unsigned int i1 )
{
const unsigned long long uptr = static_cast<unsigned long long>( i0 ) << 32 | i1;
void* ptr = reinterpret_cast<void*>( uptr );
return ptr;
}
static __forceinline__ __device__ void packPointer( void* ptr, unsigned int& i0, unsigned int& i1 )
{
const unsigned long long uptr = reinterpret_cast<unsigned long long>( ptr );
i0 = uptr >> 32;
i1 = uptr & 0x00000000ffffffff;
}
static __forceinline__ __device__ RadiancePRD* getPRD()
{
const unsigned int u0 = optixGetPayload_0();
const unsigned int u1 = optixGetPayload_1();
return reinterpret_cast<RadiancePRD*>( unpackPointer( u0, u1 ) );
}
static __forceinline__ __device__ void setPayloadOcclusion( bool occluded )
{
optixSetPayload_0( static_cast<unsigned int>( occluded ) );
}
static __forceinline__ __device__ void cosine_sample_hemisphere(const float u1, const float u2, float3& p)
{
// Uniformly sample disk.
const float r = sqrtf( u1 );
const float phi = 2.0f*M_PIf * u2;
p.x = r * cosf( phi );
p.y = r * sinf( phi );
// Project up to hemisphere.
p.z = sqrtf( fmaxf( 0.0f, 1.0f - p.x*p.x - p.y*p.y ) );
}
static __forceinline__ __device__ void traceRadiance(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax,
RadiancePRD* prd
)
{
// TODO: deduce stride from num ray-types passed in params
unsigned int u0, u1;
packPointer( prd, u0, u1 );
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_RADIANCE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_RADIANCE, // missSBTIndex
u0, u1 );
}
static __forceinline__ __device__ bool traceOcclusion(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax
)
{
unsigned int occluded = 0u;
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT,
RAY_TYPE_OCCLUSION, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_OCCLUSION, // missSBTIndex
occluded );
return occluded;
}
__forceinline__ __device__ float3 deviceColor( unsigned int idx )
{
return make_float3(
idx == 0 ? 0.05f : 0.0f,
idx == 1 ? 0.05f : 0.0f,
idx == 2 ? 0.05f : 0.0f
);
}
//------------------------------------------------------------------------------
//
//
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__rg()
{
const int w = params.width;
const int h = params.height;
const uint3 launch_idx = optixGetLaunchIndex();
const int2 pixel_idx = params.sample_index_buffer[ launch_idx.x ];
// Work distribution might assign tiles that cross over image boundary
if( pixel_idx.x > w-1 || pixel_idx.y > h-1 )
return;
const float3 eye = params.eye;
const float3 U = params.U;
const float3 V = params.V;
const float3 W = params.W;
const int subframe_index = params.subframe_index;
unsigned int seed = tea<4>( pixel_idx.y*w + pixel_idx.x, subframe_index );
float3 result = make_float3( 0.0f );
int i = params.samples_per_launch;
do
{
// The center of each pixel is at fraction (0.5,0.5)
const float2 subpixel_jitter = make_float2( rnd( seed ), rnd( seed ) );
const float2 d = 2.0f * make_float2(
( static_cast<float>( pixel_idx.x ) + subpixel_jitter.x ) / static_cast<float>( w ),
( static_cast<float>( pixel_idx.y ) + subpixel_jitter.y ) / static_cast<float>( h )
) - 1.0f;
float3 ray_direction = normalize(d.x*U + d.y*V + W);
float3 ray_origin = eye;
RadiancePRD prd;
prd.emitted = make_float3(0.f);
prd.radiance = make_float3(0.f);
prd.attenuation = make_float3(1.f);
prd.countEmitted = true;
prd.done = false;
prd.seed = seed;
int depth = 0;
for( ;; )
{
traceRadiance(
params.handle,
ray_origin,
ray_direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&prd );
result += prd.emitted;
result += prd.radiance * prd.attenuation;
if( prd.done || depth >= 3 ) // TODO RR, variable for depth
break;
ray_origin = prd.origin;
ray_direction = prd.direction;
++depth;
}
}
while( --i );
float3 accum_color = result / static_cast<float>( params.samples_per_launch );
if( subframe_index > 0 )
{
const float a = 1.0f / static_cast<float>( subframe_index+1 );
const float3 accum_color_prev = make_float3( params.sample_accum_buffer[ launch_idx.x ]);
accum_color = lerp( accum_color_prev, accum_color, a );
}
params.sample_accum_buffer [ launch_idx.x ] = make_float4( accum_color, 1.0f);
const unsigned int image_index = pixel_idx.y * params.width + pixel_idx.x;
params.result_buffer[ image_index ] = make_color ( accum_color + deviceColor( params.device_idx ) );
}
extern "C" __global__ void __miss__radiance()
{
MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() );
RadiancePRD* prd = getPRD();
prd->radiance = make_float3( rt_data->r, rt_data->g, rt_data->b );
prd->done = true;
}
extern "C" __global__ void __closesthit__occlusion()
{
setPayloadOcclusion( true );
}
extern "C" __global__ void __closesthit__radiance()
{
HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer();
const int prim_idx = optixGetPrimitiveIndex();
const float3 ray_dir = optixGetWorldRayDirection();
const int vert_idx_offset = prim_idx*3;
const float3 v0 = make_float3( rt_data->vertices[ vert_idx_offset+0 ] );
const float3 v1 = make_float3( rt_data->vertices[ vert_idx_offset+1 ] );
const float3 v2 = make_float3( rt_data->vertices[ vert_idx_offset+2 ] );
const float3 N_0 = normalize( cross( v1-v0, v2-v0 ) );
const float3 N = faceforward( N_0, -ray_dir, N_0 );
const float3 P = optixGetWorldRayOrigin() + optixGetRayTmax()*ray_dir;
RadiancePRD* prd = getPRD();
if( prd->countEmitted )
prd->emitted = rt_data->emission_color;
else
prd->emitted = make_float3( 0.0f );
unsigned int seed = prd->seed;
{
const float z1 = rnd(seed);
const float z2 = rnd(seed);
float3 w_in;
cosine_sample_hemisphere( z1, z2, w_in );
Onb onb( N );
onb.inverse_transform( w_in );
prd->direction = w_in;
prd->origin = P;
prd->attenuation *= rt_data->diffuse_color;
prd->countEmitted = false;
}
const float z1 = rnd(seed);
const float z2 = rnd(seed);
prd->seed = seed;
ParallelogramLight light = params.light;
const float3 light_pos = light.corner + light.v1 * z1 + light.v2 * z2;
// Calculate properties of light sample (for area based pdf)
const float Ldist = length(light_pos - P );
const float3 L = normalize(light_pos - P );
const float nDl = dot( N, L );
const float LnDl = -dot( light.normal, L );
float weight = 0.0f;
if( nDl > 0.0f && LnDl > 0.0f )
{
const bool occluded = traceOcclusion(
params.handle,
P,
L,
0.01f, // tmin
Ldist - 0.01f // tmax
);
if( !occluded )
{
const float A = length(cross(light.v1, light.v2));
weight = nDl * LnDl * A / (M_PIf * Ldist * Ldist);
}
}
prd->radiance += light.emission * weight;
}
| b025ae0b124af5a047138030e124c5d49fafe0d1.cu | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "optixMultiGPU.h"
#include <sutil/vec_math.h>
#include <cuda/helpers.h>
#include <cuda/random.h>
extern "C" {
__constant__ Params params;
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
struct RadiancePRD
{
// TODO: move some state directly into payload registers?
float3 emitted;
float3 radiance;
float3 attenuation;
float3 origin;
float3 direction;
unsigned int seed;
int countEmitted;
int done;
int pad;
};
struct Onb
{
__forceinline__ __device__ Onb(const float3& normal)
{
m_normal = normal;
if( fabs(m_normal.x) > fabs(m_normal.z) )
{
m_binormal.x = -m_normal.y;
m_binormal.y = m_normal.x;
m_binormal.z = 0;
}
else
{
m_binormal.x = 0;
m_binormal.y = -m_normal.z;
m_binormal.z = m_normal.y;
}
m_binormal = normalize(m_binormal);
m_tangent = cross( m_binormal, m_normal );
}
__forceinline__ __device__ void inverse_transform(float3& p) const
{
p = p.x*m_tangent + p.y*m_binormal + p.z*m_normal;
}
float3 m_tangent;
float3 m_binormal;
float3 m_normal;
};
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
static __forceinline__ __device__ void* unpackPointer( unsigned int i0, unsigned int i1 )
{
const unsigned long long uptr = static_cast<unsigned long long>( i0 ) << 32 | i1;
void* ptr = reinterpret_cast<void*>( uptr );
return ptr;
}
static __forceinline__ __device__ void packPointer( void* ptr, unsigned int& i0, unsigned int& i1 )
{
const unsigned long long uptr = reinterpret_cast<unsigned long long>( ptr );
i0 = uptr >> 32;
i1 = uptr & 0x00000000ffffffff;
}
static __forceinline__ __device__ RadiancePRD* getPRD()
{
const unsigned int u0 = optixGetPayload_0();
const unsigned int u1 = optixGetPayload_1();
return reinterpret_cast<RadiancePRD*>( unpackPointer( u0, u1 ) );
}
static __forceinline__ __device__ void setPayloadOcclusion( bool occluded )
{
optixSetPayload_0( static_cast<unsigned int>( occluded ) );
}
static __forceinline__ __device__ void cosine_sample_hemisphere(const float u1, const float u2, float3& p)
{
// Uniformly sample disk.
const float r = sqrtf( u1 );
const float phi = 2.0f*M_PIf * u2;
p.x = r * cosf( phi );
p.y = r * sinf( phi );
// Project up to hemisphere.
p.z = sqrtf( fmaxf( 0.0f, 1.0f - p.x*p.x - p.y*p.y ) );
}
static __forceinline__ __device__ void traceRadiance(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax,
RadiancePRD* prd
)
{
// TODO: deduce stride from num ray-types passed in params
unsigned int u0, u1;
packPointer( prd, u0, u1 );
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_RADIANCE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_RADIANCE, // missSBTIndex
u0, u1 );
}
static __forceinline__ __device__ bool traceOcclusion(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax
)
{
unsigned int occluded = 0u;
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT,
RAY_TYPE_OCCLUSION, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_OCCLUSION, // missSBTIndex
occluded );
return occluded;
}
__forceinline__ __device__ float3 deviceColor( unsigned int idx )
{
return make_float3(
idx == 0 ? 0.05f : 0.0f,
idx == 1 ? 0.05f : 0.0f,
idx == 2 ? 0.05f : 0.0f
);
}
//------------------------------------------------------------------------------
//
//
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__rg()
{
const int w = params.width;
const int h = params.height;
const uint3 launch_idx = optixGetLaunchIndex();
const int2 pixel_idx = params.sample_index_buffer[ launch_idx.x ];
// Work distribution might assign tiles that cross over image boundary
if( pixel_idx.x > w-1 || pixel_idx.y > h-1 )
return;
const float3 eye = params.eye;
const float3 U = params.U;
const float3 V = params.V;
const float3 W = params.W;
const int subframe_index = params.subframe_index;
unsigned int seed = tea<4>( pixel_idx.y*w + pixel_idx.x, subframe_index );
float3 result = make_float3( 0.0f );
int i = params.samples_per_launch;
do
{
// The center of each pixel is at fraction (0.5,0.5)
const float2 subpixel_jitter = make_float2( rnd( seed ), rnd( seed ) );
const float2 d = 2.0f * make_float2(
( static_cast<float>( pixel_idx.x ) + subpixel_jitter.x ) / static_cast<float>( w ),
( static_cast<float>( pixel_idx.y ) + subpixel_jitter.y ) / static_cast<float>( h )
) - 1.0f;
float3 ray_direction = normalize(d.x*U + d.y*V + W);
float3 ray_origin = eye;
RadiancePRD prd;
prd.emitted = make_float3(0.f);
prd.radiance = make_float3(0.f);
prd.attenuation = make_float3(1.f);
prd.countEmitted = true;
prd.done = false;
prd.seed = seed;
int depth = 0;
for( ;; )
{
traceRadiance(
params.handle,
ray_origin,
ray_direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&prd );
result += prd.emitted;
result += prd.radiance * prd.attenuation;
if( prd.done || depth >= 3 ) // TODO RR, variable for depth
break;
ray_origin = prd.origin;
ray_direction = prd.direction;
++depth;
}
}
while( --i );
float3 accum_color = result / static_cast<float>( params.samples_per_launch );
if( subframe_index > 0 )
{
const float a = 1.0f / static_cast<float>( subframe_index+1 );
const float3 accum_color_prev = make_float3( params.sample_accum_buffer[ launch_idx.x ]);
accum_color = lerp( accum_color_prev, accum_color, a );
}
params.sample_accum_buffer [ launch_idx.x ] = make_float4( accum_color, 1.0f);
const unsigned int image_index = pixel_idx.y * params.width + pixel_idx.x;
params.result_buffer[ image_index ] = make_color ( accum_color + deviceColor( params.device_idx ) );
}
extern "C" __global__ void __miss__radiance()
{
MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() );
RadiancePRD* prd = getPRD();
prd->radiance = make_float3( rt_data->r, rt_data->g, rt_data->b );
prd->done = true;
}
extern "C" __global__ void __closesthit__occlusion()
{
setPayloadOcclusion( true );
}
extern "C" __global__ void __closesthit__radiance()
{
HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer();
const int prim_idx = optixGetPrimitiveIndex();
const float3 ray_dir = optixGetWorldRayDirection();
const int vert_idx_offset = prim_idx*3;
const float3 v0 = make_float3( rt_data->vertices[ vert_idx_offset+0 ] );
const float3 v1 = make_float3( rt_data->vertices[ vert_idx_offset+1 ] );
const float3 v2 = make_float3( rt_data->vertices[ vert_idx_offset+2 ] );
const float3 N_0 = normalize( cross( v1-v0, v2-v0 ) );
const float3 N = faceforward( N_0, -ray_dir, N_0 );
const float3 P = optixGetWorldRayOrigin() + optixGetRayTmax()*ray_dir;
RadiancePRD* prd = getPRD();
if( prd->countEmitted )
prd->emitted = rt_data->emission_color;
else
prd->emitted = make_float3( 0.0f );
unsigned int seed = prd->seed;
{
const float z1 = rnd(seed);
const float z2 = rnd(seed);
float3 w_in;
cosine_sample_hemisphere( z1, z2, w_in );
Onb onb( N );
onb.inverse_transform( w_in );
prd->direction = w_in;
prd->origin = P;
prd->attenuation *= rt_data->diffuse_color;
prd->countEmitted = false;
}
const float z1 = rnd(seed);
const float z2 = rnd(seed);
prd->seed = seed;
ParallelogramLight light = params.light;
const float3 light_pos = light.corner + light.v1 * z1 + light.v2 * z2;
// Calculate properties of light sample (for area based pdf)
const float Ldist = length(light_pos - P );
const float3 L = normalize(light_pos - P );
const float nDl = dot( N, L );
const float LnDl = -dot( light.normal, L );
float weight = 0.0f;
if( nDl > 0.0f && LnDl > 0.0f )
{
const bool occluded = traceOcclusion(
params.handle,
P,
L,
0.01f, // tmin
Ldist - 0.01f // tmax
);
if( !occluded )
{
const float A = length(cross(light.v1, light.v2));
weight = nDl * LnDl * A / (M_PIf * Ldist * Ldist);
}
}
prd->radiance += light.emission * weight;
}
|
8e2d9373adbdae2d35ba007098f6aa9909ae7883.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <af/dim4.hpp>
#include <af/defines.h>
#include <ArrayInfo.hpp>
#include <Array.hpp>
#include <fft.hpp>
#include <err_cuda.hpp>
#include <hipfft.h>
#include <hip/hip_complex.h>
#include <string>
#include <cstdio>
using af::dim4;
using std::string;
namespace cuda
{
// cuFFTPlanner will do very basic plan caching.
// it looks for required candidate in mHandles array and returns if found one.
// otherwise, it will create a plan and set it at the mAvailSlotIndex and increment
// the slot index variable in ciruclar fashion 0 to MAX_PLAN_CACHE, then back to zero and repeat.
class cuFFTPlanner
{
friend void find_cufft_plan(hipfftHandle &plan, int rank, int *n,
int *inembed, int istride, int idist,
int *onembed, int ostride, int odist,
hipfftType type, int batch);
public:
static cuFFTPlanner& getInstance() {
static cuFFTPlanner single_instance;
return single_instance;
}
private:
cuFFTPlanner() : mAvailSlotIndex(0) {}
cuFFTPlanner(cuFFTPlanner const&);
void operator=(cuFFTPlanner const&);
static const int MAX_PLAN_CACHE = 5;
int mAvailSlotIndex;
hipfftHandle mHandles[MAX_PLAN_CACHE];
string mKeys[MAX_PLAN_CACHE];
};
void find_cufft_plan(hipfftHandle &plan, int rank, int *n,
int *inembed, int istride, int idist,
int *onembed, int ostride, int odist,
hipfftType type, int batch)
{
cuFFTPlanner &planner = cuFFTPlanner::getInstance();
// create the key string
char key_str_temp[64];
sprintf(key_str_temp, "%d:", rank);
string key_string(key_str_temp);
for(int r=0; r<rank; ++r) {
sprintf(key_str_temp, "%d:", n[r]);
key_string.append(std::string(key_str_temp));
}
if (inembed!=NULL) {
for(int r=0; r<rank; ++r) {
sprintf(key_str_temp, "%d:", inembed[r]);
key_string.append(std::string(key_str_temp));
}
sprintf(key_str_temp, "%d:%d:", istride, idist);
key_string.append(std::string(key_str_temp));
}
if (onembed!=NULL) {
for(int r=0; r<rank; ++r) {
sprintf(key_str_temp, "%d:", onembed[r]);
key_string.append(std::string(key_str_temp));
}
sprintf(key_str_temp, "%d:%d:", ostride, odist);
key_string.append(std::string(key_str_temp));
}
sprintf(key_str_temp, "%d:%d", (int)type, batch);
key_string.append(std::string(key_str_temp));
// find the matching plan_index in the array cuFFTPlanner::mKeys
int plan_index = -1;
for (int i=0; i<cuFFTPlanner::MAX_PLAN_CACHE; ++i) {
if (key_string==planner.mKeys[i]) {
plan_index = i;
break;
}
}
// return mHandles[plan_index] if plan_index valid
if (plan_index!=-1) {
plan = planner.mHandles[plan_index];
return;
}
// otherwise create a new plan and set it at mAvailSlotIndex
// and finally set it to output plan variable
int slot_index = planner.mAvailSlotIndex;
hipfftResult res= hipfftDestroy(planner.mHandles[slot_index]);
if (res==HIPFFT_SUCCESS || HIPFFT_INVALID_PLAN) {
hipfftHandle temp;
hipfftResult res = hipfftPlanMany(&temp, rank, n, inembed, istride, idist, onembed, ostride, odist, type, batch);
switch(res) {
case HIPFFT_ALLOC_FAILED : AF_ERROR("cuFFTPlanMany: cuFFT GPU resource allocation failed" , AF_ERR_INTERNAL);
case HIPFFT_INVALID_VALUE : AF_ERROR("cuFFTPlanMany: invalid parameters passed to cuFFT API" , AF_ERR_INTERNAL);
case HIPFFT_INTERNAL_ERROR: AF_ERROR("cuFFTPlanMany: internal driver detected using cuFFT" , AF_ERR_INTERNAL);
case HIPFFT_SETUP_FAILED : AF_ERROR("cuFFTPlanMany: cuFFT library initialization failed" , AF_ERR_INTERNAL);
case HIPFFT_INVALID_SIZE : AF_ERROR("cuFFTPlanMany: invalid size parameters passed to cuFFT", AF_ERR_INTERNAL);
default: //HIPFFT_SUCCESS
{
plan = temp;
planner.mHandles[slot_index] = temp;
planner.mKeys[slot_index] = key_string;
planner.mAvailSlotIndex = (slot_index + 1)%cuFFTPlanner::MAX_PLAN_CACHE;
}
break;
}
} else
AF_ERROR("cuFFTDestroy call failed", AF_ERR_INTERNAL);
}
template<typename T>
struct cufft_transform;
#define CUFFT_FUNC(T, TRANSFORM_TYPE) \
template<> \
struct cufft_transform<T> \
{ \
enum { type = CUFFT_##TRANSFORM_TYPE }; \
hipfftResult \
operator() (hipfftHandle plan, T *in, T *out, int dir) { \
return cufftExec##TRANSFORM_TYPE(plan, in, out, dir); \
} \
};
CUFFT_FUNC(cfloat , C2C)
CUFFT_FUNC(cdouble, Z2Z)
template<int rank>
void computeDims(int *rdims, const dim4 &idims)
{
if (rank==3) {
rdims[0] = idims[2];
rdims[1] = idims[1];
rdims[2] = idims[0];
} else if(rank==2) {
rdims[0] = idims[1];
rdims[1] = idims[0];
} else {
rdims[0] = idims[0];
}
}
template<typename T, int rank, int direction>
void cufft_common(Array<T> &arr)
{
const dim4 dims = arr.dims();
const dim4 strides = arr.strides();
int rank_dims[3];
switch(rank) {
case 1: computeDims<1>(rank_dims, dims); break;
case 2: computeDims<2>(rank_dims, dims); break;
case 3: computeDims<3>(rank_dims, dims); break;
}
hipfftHandle plan;
find_cufft_plan(plan, rank, rank_dims,
NULL, strides[0], strides[rank],
NULL, strides[0], strides[rank],
(hipfftType)cufft_transform<T>::type, dims[rank]);
cufft_transform<T> transform;
transform(plan, arr.get(), arr.get(), direction);
}
template<int rank>
void computePaddedDims(dim4 &pdims, dim_type const * const pad)
{
if (rank==1) {
pdims[0] = pad[0];
} else if (rank==2) {
pdims[0] = pad[0];
pdims[1] = pad[1];
} else if (rank==3) {
pdims[0] = pad[0];
pdims[1] = pad[1];
pdims[2] = pad[2];
}
}
template<typename T> T zero() { return 0; }
template<> cfloat zero<cfloat>() { return make_cuFloatComplex(0.0f, 0.0f); }
template<> cdouble zero<cdouble>() { return make_cuDoubleComplex(0.0, 0.0); }
template<typename inType, typename outType, int rank, bool isR2C>
Array<outType> * fft(Array<inType> const &in, double normalize, dim_type const npad, dim_type const * const pad)
{
ARG_ASSERT(1, (in.isOwner()==true));
dim4 pdims(1);
switch(rank) {
case 1 : computePaddedDims<1>(pdims, pad); break;
case 2 : computePaddedDims<2>(pdims, pad); break;
case 3 : computePaddedDims<3>(pdims, pad); break;
default: AF_ERROR("invalid rank", AF_ERR_SIZE);
}
pdims[rank] = in.dims()[rank];
Array<outType> *ret = createPaddedArray<inType, outType>(in, (npad>0 ? pdims : in.dims()), zero<outType>(), normalize);
cufft_common<outType, rank, HIPFFT_FORWARD>(*ret);
return ret;
}
template<typename T, int rank>
Array<T> * ifft(Array<T> const &in, double normalize, dim_type const npad, dim_type const * const pad)
{
ARG_ASSERT(1, (in.isOwner()==true));
dim4 pdims(1);
switch(rank) {
case 1 : computePaddedDims<1>(pdims, pad); break;
case 2 : computePaddedDims<2>(pdims, pad); break;
case 3 : computePaddedDims<3>(pdims, pad); break;
default: AF_ERROR("invalid rank", AF_ERR_SIZE);
}
pdims[rank] = in.dims()[rank];
Array<T> *ret = createPaddedArray<T, T>(in, (npad>0 ? pdims : in.dims()), zero<T>(), normalize);
cufft_common<T, rank, HIPFFT_BACKWARD>(*ret);
return ret;
}
#define INSTANTIATE1(T1, T2)\
template Array<T2> * fft <T1, T2, 1, true >(const Array<T1> &in, double normalize, dim_type const npad, dim_type const * const pad); \
template Array<T2> * fft <T1, T2, 2, true >(const Array<T1> &in, double normalize, dim_type const npad, dim_type const * const pad); \
template Array<T2> * fft <T1, T2, 3, true >(const Array<T1> &in, double normalize, dim_type const npad, dim_type const * const pad);
INSTANTIATE1(float , cfloat )
INSTANTIATE1(double , cdouble)
#define INSTANTIATE2(T)\
template Array<T> * fft <T, T, 1, false>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); \
template Array<T> * fft <T, T, 2, false>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); \
template Array<T> * fft <T, T, 3, false>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); \
template Array<T> * ifft<T, 1>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); \
template Array<T> * ifft<T, 2>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); \
template Array<T> * ifft<T, 3>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad);
INSTANTIATE2(cfloat )
INSTANTIATE2(cdouble)
}
| 8e2d9373adbdae2d35ba007098f6aa9909ae7883.cu | /*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <af/dim4.hpp>
#include <af/defines.h>
#include <ArrayInfo.hpp>
#include <Array.hpp>
#include <fft.hpp>
#include <err_cuda.hpp>
#include <cufft.h>
#include <cuComplex.h>
#include <string>
#include <cstdio>
using af::dim4;
using std::string;
namespace cuda
{
// cuFFTPlanner will do very basic plan caching.
// it looks for required candidate in mHandles array and returns if found one.
// otherwise, it will create a plan and set it at the mAvailSlotIndex and increment
// the slot index variable in ciruclar fashion 0 to MAX_PLAN_CACHE, then back to zero and repeat.
class cuFFTPlanner
{
friend void find_cufft_plan(cufftHandle &plan, int rank, int *n,
int *inembed, int istride, int idist,
int *onembed, int ostride, int odist,
cufftType type, int batch);
public:
static cuFFTPlanner& getInstance() {
static cuFFTPlanner single_instance;
return single_instance;
}
private:
cuFFTPlanner() : mAvailSlotIndex(0) {}
cuFFTPlanner(cuFFTPlanner const&);
void operator=(cuFFTPlanner const&);
static const int MAX_PLAN_CACHE = 5;
int mAvailSlotIndex;
cufftHandle mHandles[MAX_PLAN_CACHE];
string mKeys[MAX_PLAN_CACHE];
};
void find_cufft_plan(cufftHandle &plan, int rank, int *n,
int *inembed, int istride, int idist,
int *onembed, int ostride, int odist,
cufftType type, int batch)
{
cuFFTPlanner &planner = cuFFTPlanner::getInstance();
// create the key string
char key_str_temp[64];
sprintf(key_str_temp, "%d:", rank);
string key_string(key_str_temp);
for(int r=0; r<rank; ++r) {
sprintf(key_str_temp, "%d:", n[r]);
key_string.append(std::string(key_str_temp));
}
if (inembed!=NULL) {
for(int r=0; r<rank; ++r) {
sprintf(key_str_temp, "%d:", inembed[r]);
key_string.append(std::string(key_str_temp));
}
sprintf(key_str_temp, "%d:%d:", istride, idist);
key_string.append(std::string(key_str_temp));
}
if (onembed!=NULL) {
for(int r=0; r<rank; ++r) {
sprintf(key_str_temp, "%d:", onembed[r]);
key_string.append(std::string(key_str_temp));
}
sprintf(key_str_temp, "%d:%d:", ostride, odist);
key_string.append(std::string(key_str_temp));
}
sprintf(key_str_temp, "%d:%d", (int)type, batch);
key_string.append(std::string(key_str_temp));
// find the matching plan_index in the array cuFFTPlanner::mKeys
int plan_index = -1;
for (int i=0; i<cuFFTPlanner::MAX_PLAN_CACHE; ++i) {
if (key_string==planner.mKeys[i]) {
plan_index = i;
break;
}
}
// return mHandles[plan_index] if plan_index valid
if (plan_index!=-1) {
plan = planner.mHandles[plan_index];
return;
}
// otherwise create a new plan and set it at mAvailSlotIndex
// and finally set it to output plan variable
int slot_index = planner.mAvailSlotIndex;
cufftResult res= cufftDestroy(planner.mHandles[slot_index]);
if (res==CUFFT_SUCCESS || CUFFT_INVALID_PLAN) {
cufftHandle temp;
cufftResult res = cufftPlanMany(&temp, rank, n, inembed, istride, idist, onembed, ostride, odist, type, batch);
switch(res) {
case CUFFT_ALLOC_FAILED : AF_ERROR("cuFFTPlanMany: cuFFT GPU resource allocation failed" , AF_ERR_INTERNAL);
case CUFFT_INVALID_VALUE : AF_ERROR("cuFFTPlanMany: invalid parameters passed to cuFFT API" , AF_ERR_INTERNAL);
case CUFFT_INTERNAL_ERROR: AF_ERROR("cuFFTPlanMany: internal driver detected using cuFFT" , AF_ERR_INTERNAL);
case CUFFT_SETUP_FAILED : AF_ERROR("cuFFTPlanMany: cuFFT library initialization failed" , AF_ERR_INTERNAL);
case CUFFT_INVALID_SIZE : AF_ERROR("cuFFTPlanMany: invalid size parameters passed to cuFFT", AF_ERR_INTERNAL);
default: //CUFFT_SUCCESS
{
plan = temp;
planner.mHandles[slot_index] = temp;
planner.mKeys[slot_index] = key_string;
planner.mAvailSlotIndex = (slot_index + 1)%cuFFTPlanner::MAX_PLAN_CACHE;
}
break;
}
} else
AF_ERROR("cuFFTDestroy call failed", AF_ERR_INTERNAL);
}
template<typename T>
struct cufft_transform;
#define CUFFT_FUNC(T, TRANSFORM_TYPE) \
template<> \
struct cufft_transform<T> \
{ \
enum { type = CUFFT_##TRANSFORM_TYPE }; \
cufftResult \
operator() (cufftHandle plan, T *in, T *out, int dir) { \
return cufftExec##TRANSFORM_TYPE(plan, in, out, dir); \
} \
};
CUFFT_FUNC(cfloat , C2C)
CUFFT_FUNC(cdouble, Z2Z)
template<int rank>
void computeDims(int *rdims, const dim4 &idims)
{
if (rank==3) {
rdims[0] = idims[2];
rdims[1] = idims[1];
rdims[2] = idims[0];
} else if(rank==2) {
rdims[0] = idims[1];
rdims[1] = idims[0];
} else {
rdims[0] = idims[0];
}
}
template<typename T, int rank, int direction>
void cufft_common(Array<T> &arr)
{
const dim4 dims = arr.dims();
const dim4 strides = arr.strides();
int rank_dims[3];
switch(rank) {
case 1: computeDims<1>(rank_dims, dims); break;
case 2: computeDims<2>(rank_dims, dims); break;
case 3: computeDims<3>(rank_dims, dims); break;
}
cufftHandle plan;
find_cufft_plan(plan, rank, rank_dims,
NULL, strides[0], strides[rank],
NULL, strides[0], strides[rank],
(cufftType)cufft_transform<T>::type, dims[rank]);
cufft_transform<T> transform;
transform(plan, arr.get(), arr.get(), direction);
}
template<int rank>
void computePaddedDims(dim4 &pdims, dim_type const * const pad)
{
if (rank==1) {
pdims[0] = pad[0];
} else if (rank==2) {
pdims[0] = pad[0];
pdims[1] = pad[1];
} else if (rank==3) {
pdims[0] = pad[0];
pdims[1] = pad[1];
pdims[2] = pad[2];
}
}
template<typename T> T zero() { return 0; }
template<> cfloat zero<cfloat>() { return make_cuFloatComplex(0.0f, 0.0f); }
template<> cdouble zero<cdouble>() { return make_cuDoubleComplex(0.0, 0.0); }
template<typename inType, typename outType, int rank, bool isR2C>
Array<outType> * fft(Array<inType> const &in, double normalize, dim_type const npad, dim_type const * const pad)
{
ARG_ASSERT(1, (in.isOwner()==true));
dim4 pdims(1);
switch(rank) {
case 1 : computePaddedDims<1>(pdims, pad); break;
case 2 : computePaddedDims<2>(pdims, pad); break;
case 3 : computePaddedDims<3>(pdims, pad); break;
default: AF_ERROR("invalid rank", AF_ERR_SIZE);
}
pdims[rank] = in.dims()[rank];
Array<outType> *ret = createPaddedArray<inType, outType>(in, (npad>0 ? pdims : in.dims()), zero<outType>(), normalize);
cufft_common<outType, rank, CUFFT_FORWARD>(*ret);
return ret;
}
template<typename T, int rank>
Array<T> * ifft(Array<T> const &in, double normalize, dim_type const npad, dim_type const * const pad)
{
ARG_ASSERT(1, (in.isOwner()==true));
dim4 pdims(1);
switch(rank) {
case 1 : computePaddedDims<1>(pdims, pad); break;
case 2 : computePaddedDims<2>(pdims, pad); break;
case 3 : computePaddedDims<3>(pdims, pad); break;
default: AF_ERROR("invalid rank", AF_ERR_SIZE);
}
pdims[rank] = in.dims()[rank];
Array<T> *ret = createPaddedArray<T, T>(in, (npad>0 ? pdims : in.dims()), zero<T>(), normalize);
cufft_common<T, rank, CUFFT_INVERSE>(*ret);
return ret;
}
#define INSTANTIATE1(T1, T2)\
template Array<T2> * fft <T1, T2, 1, true >(const Array<T1> &in, double normalize, dim_type const npad, dim_type const * const pad); \
template Array<T2> * fft <T1, T2, 2, true >(const Array<T1> &in, double normalize, dim_type const npad, dim_type const * const pad); \
template Array<T2> * fft <T1, T2, 3, true >(const Array<T1> &in, double normalize, dim_type const npad, dim_type const * const pad);
INSTANTIATE1(float , cfloat )
INSTANTIATE1(double , cdouble)
#define INSTANTIATE2(T)\
template Array<T> * fft <T, T, 1, false>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); \
template Array<T> * fft <T, T, 2, false>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); \
template Array<T> * fft <T, T, 3, false>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); \
template Array<T> * ifft<T, 1>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); \
template Array<T> * ifft<T, 2>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); \
template Array<T> * ifft<T, 3>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad);
INSTANTIATE2(cfloat )
INSTANTIATE2(cdouble)
}
|
1532980a67bb305c3be80880f20dcfe66fc266fb.hip | // !!! This is a file automatically generated by hipify!!!
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "tests.cuh"
int main(int argc, char *argv[]) {
if (argc < 3) {
printf("Usage:\n./main <seq1> <seq2>\n");
return 1;
}
char* seq1 = read_fasta(argv[1], 65536);
bool seq1_from_file = seq1 ? true : false;
char* seq1_actual = seq1_from_file ? seq1 : argv[1];
char* seq2 = read_fasta(argv[2], 65536);
bool seq2_from_file = seq2 ? true : false;
char* seq2_actual = seq2_from_file ? seq2 : argv[2];
unsigned long len1 = strlen(seq1_actual);
unsigned long len2 = strlen(seq2_actual);
char* d_seq1;
hipMallocManaged(&d_seq1, sizeof(char) * len1);
hipMemcpy(d_seq1, seq1_actual, len1, hipMemcpyHostToDevice);
char* d_seq2;
hipMallocManaged(&d_seq2, sizeof(char) * len2);
hipMemcpy(d_seq2, seq2_actual, len2, hipMemcpyHostToDevice);
hipStream_t stream;
hipStreamCreate(&stream);
run_sw(d_seq1, len1, d_seq2, len2, stream);
run_nw(d_seq1, len1, d_seq2, len2, stream);
run_sw_linear_parallel(d_seq1, len1, d_seq2, len2, stream);
run_sw_gotoh(d_seq1, len1, d_seq2, len2, stream);
run_sw_singleblock(d_seq1, len1, d_seq2, len2, stream);
hipFree(d_seq1);
hipFree(d_seq2);
hipStreamDestroy(stream);
if (seq1_from_file) free(seq1);
if (seq2_from_file) free(seq2);
return 0;
}
| 1532980a67bb305c3be80880f20dcfe66fc266fb.cu | #include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "tests.cuh"
int main(int argc, char *argv[]) {
if (argc < 3) {
printf("Usage:\n./main <seq1> <seq2>\n");
return 1;
}
char* seq1 = read_fasta(argv[1], 65536);
bool seq1_from_file = seq1 ? true : false;
char* seq1_actual = seq1_from_file ? seq1 : argv[1];
char* seq2 = read_fasta(argv[2], 65536);
bool seq2_from_file = seq2 ? true : false;
char* seq2_actual = seq2_from_file ? seq2 : argv[2];
unsigned long len1 = strlen(seq1_actual);
unsigned long len2 = strlen(seq2_actual);
char* d_seq1;
cudaMallocManaged(&d_seq1, sizeof(char) * len1);
cudaMemcpy(d_seq1, seq1_actual, len1, cudaMemcpyHostToDevice);
char* d_seq2;
cudaMallocManaged(&d_seq2, sizeof(char) * len2);
cudaMemcpy(d_seq2, seq2_actual, len2, cudaMemcpyHostToDevice);
cudaStream_t stream;
cudaStreamCreate(&stream);
run_sw(d_seq1, len1, d_seq2, len2, stream);
run_nw(d_seq1, len1, d_seq2, len2, stream);
run_sw_linear_parallel(d_seq1, len1, d_seq2, len2, stream);
run_sw_gotoh(d_seq1, len1, d_seq2, len2, stream);
run_sw_singleblock(d_seq1, len1, d_seq2, len2, stream);
cudaFree(d_seq1);
cudaFree(d_seq2);
cudaStreamDestroy(stream);
if (seq1_from_file) free(seq1);
if (seq2_from_file) free(seq2);
return 0;
}
|
87292eee8c8e5b26e3f72e85b8eae42a180a9571.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_advec_mom_kernel2_z [4][2];
static int dims_advec_mom_kernel2_z_h [4][2] = {0};
//user function
__device__
inline void advec_mom_kernel2_z_gpu(ACC<double> &vel1,
const ACC<double> &node_mass_post,
const ACC<double> &node_mass_pre,
const ACC<double> &mom_flux) {
vel1(0,0,0) = ( vel1(0,0,0) * node_mass_pre(0,0,0) +
mom_flux(0,0,-1) - mom_flux(0,0,0) ) / node_mass_post(0,0,0);
}
__global__ void ops_advec_mom_kernel2_z(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_z[0][0] + idx_z * 1*1 * dims_advec_mom_kernel2_z[0][0] * dims_advec_mom_kernel2_z[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_z[1][0] + idx_z * 1*1 * dims_advec_mom_kernel2_z[1][0] * dims_advec_mom_kernel2_z[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_z[2][0] + idx_z * 1*1 * dims_advec_mom_kernel2_z[2][0] * dims_advec_mom_kernel2_z[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_z[3][0] + idx_z * 1*1 * dims_advec_mom_kernel2_z[3][0] * dims_advec_mom_kernel2_z[3][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_advec_mom_kernel2_z[0][0], dims_advec_mom_kernel2_z[0][1], arg0);
const ACC<double> argp1(dims_advec_mom_kernel2_z[1][0], dims_advec_mom_kernel2_z[1][1], arg1);
const ACC<double> argp2(dims_advec_mom_kernel2_z[2][0], dims_advec_mom_kernel2_z[2][1], arg2);
const ACC<double> argp3(dims_advec_mom_kernel2_z[3][0], dims_advec_mom_kernel2_z[3][1], arg3);
advec_mom_kernel2_z_gpu(argp0, argp1, argp2, argp3);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_mom_kernel2_z(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
#else
void ops_par_loop_advec_mom_kernel2_z_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[4] = { arg0, arg1, arg2, arg3};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,4,range,137)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(137,"advec_mom_kernel2_z");
OPS_kernels[137].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 4,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
if (xdim0 != dims_advec_mom_kernel2_z_h[0][0] || ydim0 != dims_advec_mom_kernel2_z_h[0][1] || xdim1 != dims_advec_mom_kernel2_z_h[1][0] || ydim1 != dims_advec_mom_kernel2_z_h[1][1] || xdim2 != dims_advec_mom_kernel2_z_h[2][0] || ydim2 != dims_advec_mom_kernel2_z_h[2][1] || xdim3 != dims_advec_mom_kernel2_z_h[3][0] || ydim3 != dims_advec_mom_kernel2_z_h[3][1]) {
dims_advec_mom_kernel2_z_h[0][0] = xdim0;
dims_advec_mom_kernel2_z_h[0][1] = ydim0;
dims_advec_mom_kernel2_z_h[1][0] = xdim1;
dims_advec_mom_kernel2_z_h[1][1] = ydim1;
dims_advec_mom_kernel2_z_h[2][0] = xdim2;
dims_advec_mom_kernel2_z_h[2][1] = ydim2;
dims_advec_mom_kernel2_z_h[3][0] = xdim3;
dims_advec_mom_kernel2_z_h[3][1] = ydim3;
cutilSafeCall(hipMemcpyToSymbol( dims_advec_mom_kernel2_z, dims_advec_mom_kernel2_z_h, sizeof(dims_advec_mom_kernel2_z)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
char *p_a[4];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args,4,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[137].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_advec_mom_kernel2_z), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[137].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[137].mpi_time += t2-t1;
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_mom_kernel2_z(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 137;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 137;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 4;
desc->args = (ops_arg*)malloc(4*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->function = ops_par_loop_advec_mom_kernel2_z_execute;
if (OPS_diags > 1) {
ops_timing_realloc(137,"advec_mom_kernel2_z");
}
ops_enqueue_kernel(desc);
}
#endif
| 87292eee8c8e5b26e3f72e85b8eae42a180a9571.cu | //
// auto-generated by ops.py
//
__constant__ int dims_advec_mom_kernel2_z [4][2];
static int dims_advec_mom_kernel2_z_h [4][2] = {0};
//user function
__device__
inline void advec_mom_kernel2_z_gpu(ACC<double> &vel1,
const ACC<double> &node_mass_post,
const ACC<double> &node_mass_pre,
const ACC<double> &mom_flux) {
vel1(0,0,0) = ( vel1(0,0,0) * node_mass_pre(0,0,0) +
mom_flux(0,0,-1) - mom_flux(0,0,0) ) / node_mass_post(0,0,0);
}
__global__ void ops_advec_mom_kernel2_z(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_z[0][0] + idx_z * 1*1 * dims_advec_mom_kernel2_z[0][0] * dims_advec_mom_kernel2_z[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_z[1][0] + idx_z * 1*1 * dims_advec_mom_kernel2_z[1][0] * dims_advec_mom_kernel2_z[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_z[2][0] + idx_z * 1*1 * dims_advec_mom_kernel2_z[2][0] * dims_advec_mom_kernel2_z[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_z[3][0] + idx_z * 1*1 * dims_advec_mom_kernel2_z[3][0] * dims_advec_mom_kernel2_z[3][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_advec_mom_kernel2_z[0][0], dims_advec_mom_kernel2_z[0][1], arg0);
const ACC<double> argp1(dims_advec_mom_kernel2_z[1][0], dims_advec_mom_kernel2_z[1][1], arg1);
const ACC<double> argp2(dims_advec_mom_kernel2_z[2][0], dims_advec_mom_kernel2_z[2][1], arg2);
const ACC<double> argp3(dims_advec_mom_kernel2_z[3][0], dims_advec_mom_kernel2_z[3][1], arg3);
advec_mom_kernel2_z_gpu(argp0, argp1, argp2, argp3);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_mom_kernel2_z(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
#else
void ops_par_loop_advec_mom_kernel2_z_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[4] = { arg0, arg1, arg2, arg3};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,4,range,137)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(137,"advec_mom_kernel2_z");
OPS_kernels[137].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 4,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
if (xdim0 != dims_advec_mom_kernel2_z_h[0][0] || ydim0 != dims_advec_mom_kernel2_z_h[0][1] || xdim1 != dims_advec_mom_kernel2_z_h[1][0] || ydim1 != dims_advec_mom_kernel2_z_h[1][1] || xdim2 != dims_advec_mom_kernel2_z_h[2][0] || ydim2 != dims_advec_mom_kernel2_z_h[2][1] || xdim3 != dims_advec_mom_kernel2_z_h[3][0] || ydim3 != dims_advec_mom_kernel2_z_h[3][1]) {
dims_advec_mom_kernel2_z_h[0][0] = xdim0;
dims_advec_mom_kernel2_z_h[0][1] = ydim0;
dims_advec_mom_kernel2_z_h[1][0] = xdim1;
dims_advec_mom_kernel2_z_h[1][1] = ydim1;
dims_advec_mom_kernel2_z_h[2][0] = xdim2;
dims_advec_mom_kernel2_z_h[2][1] = ydim2;
dims_advec_mom_kernel2_z_h[3][0] = xdim3;
dims_advec_mom_kernel2_z_h[3][1] = ydim3;
cutilSafeCall(cudaMemcpyToSymbol( dims_advec_mom_kernel2_z, dims_advec_mom_kernel2_z_h, sizeof(dims_advec_mom_kernel2_z)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
char *p_a[4];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args,4,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[137].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_advec_mom_kernel2_z<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[137].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[137].mpi_time += t2-t1;
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_mom_kernel2_z(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 137;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 137;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 4;
desc->args = (ops_arg*)malloc(4*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->function = ops_par_loop_advec_mom_kernel2_z_execute;
if (OPS_diags > 1) {
ops_timing_realloc(137,"advec_mom_kernel2_z");
}
ops_enqueue_kernel(desc);
}
#endif
|
bfa0f0f1809cd075aa76266d6d8edf28172d0b4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
LegionRuntime::Logger::Category log_mse("mse");
void FFModel::mse_loss(const std::string& pcname,
const Tensor& _logit,
const Tensor& _label,
const std::string& reduction)
{
AggrMode aggr = AGGR_MODE_NONE;
if (reduction == "sum")
aggr = AGGR_MODE_SUM;
else if (reduction == "average")
aggr = AGGR_MODE_AVG;
else
assert(reduction == "none");
MSELoss* op = new MSELoss(*this, pcname, _logit, _label, aggr);
layers.push_back(op);
}
MSELoss::MSELoss(FFModel& model,
const std::string& pcname,
const Tensor& _logit,
const Tensor& _label,
AggrMode _aggr)
: Op(pcname, _logit, _label), profiling(model.config.profiling),
aggr_mode(_aggr)
{
task_is = model.get_or_create_task_is(2/*numDim*/, pcname);
// Current assume 2D logit and label
assert(_logit.numDim == 2);
assert(_label.numDim == 2);
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is);
}
void MSELoss::init(const FFModel& model)
{}
void MSELoss::forward(const FFModel& model)
{
}
__global__
void multi_category_calc_loss(const float* logits,
const float* labels,
PerfMetrics* perf,
int batch_size,
int out_dim,
float scale)
{
assert(out_dim > 1);
CUDA_KERNEL_LOOP(b, batch_size)
{
float max_val = 0.0f;
int true_label = -1, my_label = -1;
for (int i = 0; i < out_dim; i++) {
if (logits[b*out_dim+i] > max_val) {
max_val = logits[b*out_dim+i];
my_label = i;
}
if (labels[b*out_dim+i] > 0.9f) {
assert(true_label == -1);
true_label = i;
}
}
assert(true_label >= 0);
for (int i = 0; i < out_dim; i++) {
float diff = logits[b*out_dim+i] - labels[b*out_dim+i];
atomicAdd(&(perf->train_loss), diff * diff);
}
atomicAdd(&(perf->train_all), 1);
if (true_label == my_label)
atomicAdd(&(perf->train_correct), 1);
}
}
__global__
void single_category_calc_loss(const float* logits,
const float* labels,
PerfMetrics* perf,
int batch_size,
int out_dim,
float scale)
{
assert(out_dim == 1);
CUDA_KERNEL_LOOP(b, batch_size)
{
float diff = logits[b] - labels[b];
atomicAdd(&(perf->train_loss), diff * diff);
atomicAdd(&(perf->train_all), 1);
if ((logits[b] < 0.5f) == (labels[b] < 0.5f))
atomicAdd(&(perf->train_correct), 1);
}
}
__global__
void mseloss_backward(float* logitsGrad,
const float* logits,
const float* labels,
float factor,
int size)
{
CUDA_KERNEL_LOOP(i, size)
{
logitsGrad[i] = factor * (logits[i] - labels[i]);
}
}
__host__
PerfMetrics MSELoss::backward_task(const Task *task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
assert(regions.size() == 3);
assert(task->regions.size() == 3);
const MSELoss* op = (MSELoss*) task->args;
TensorAccessorR<float, 2> accLogits(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorR<float, 2> accLabels(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> accLogitsGrad(
regions[2], task->regions[2], FID_DATA, ctx, runtime, false/*readOutput*/);
assert(accLogits.rect == accLabels.rect);
assert(accLogits.rect == accLogitsGrad.rect);
int batch_size = accLogits.rect.hi[1] - accLogits.rect.lo[1] + 1;
int out_dim = accLogits.rect.hi[0] - accLogits.rect.lo[0] + 1;
float scale = 1.0f;
switch (op->aggr_mode) {
case AGGR_MODE_SUM:
scale = 1.0f;
break;
case AGGR_MODE_AVG:
// Divided by the global batch size
scale = 1.0f / (op->inputs[0].adim[1]);
break;
default:
assert(false);
}
// Calculate loss
PerfMetrics* perf;
PerfMetrics perf_zc;
perf_zc.train_loss = 0.0f;
perf_zc.train_correct = perf_zc.train_all = 0;
perf_zc.test_correct = perf_zc.test_all = 0;
perf_zc.val_correct = perf_zc.val_all = 0;
checkCUDA(hipMalloc(&perf, sizeof(PerfMetrics)));
checkCUDA(hipMemcpy(perf, &perf_zc, sizeof(PerfMetrics), hipMemcpyHostToDevice));
if (out_dim == 1) {
hipLaunchKernelGGL(( single_category_calc_loss), dim3(GET_BLOCKS(batch_size)), dim3(CUDA_NUM_THREADS), 0, 0,
accLogits.ptr, accLabels.ptr, perf, batch_size, out_dim, scale);
} else {
hipLaunchKernelGGL(( multi_category_calc_loss), dim3(GET_BLOCKS(batch_size)), dim3(CUDA_NUM_THREADS), 0, 0,
accLogits.ptr, accLabels.ptr, perf, batch_size, out_dim, scale);
}
checkCUDA(hipMemcpy(&perf_zc, perf, sizeof(PerfMetrics), hipMemcpyDeviceToHost));
checkCUDA(hipFree(perf));
// Calculate backward
hipLaunchKernelGGL(( mseloss_backward), dim3(GET_BLOCKS(accLogits.rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
accLogitsGrad.ptr, accLogits.ptr, accLabels.ptr,
scale, accLogits.rect.volume());
checkCUDA(hipDeviceSynchronize());
if (op->profiling) {
print_tensor<2, float>(accLabels.ptr, accLabels.rect, "[MSELoss:label]");
print_tensor<2, float>(accLogits.ptr, accLogits.rect, "[MSELoss:logit]");
print_tensor<2, float>(accLogitsGrad.ptr, accLogitsGrad.rect, "[MSELoss:logit_grad]");
}
return perf_zc;
}
void MSELoss::backward(const FFModel& model)
{
ArgumentMap argmap;
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
IndexLauncher launcher(MSELOSS_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(MSELoss)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// regions[0]: _logit
launcher.add_region_requirement(
RegionRequirement(inputs[0].part, 0/*projection*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// regions[1]: _label
launcher.add_region_requirement(
RegionRequirement(inputs[1].part, 0/*projection*/,
READ_ONLY, EXCLUSIVE, inputs[1].region));
launcher.add_field(1, FID_DATA);
// regions[2]: logit_grad
launcher.add_region_requirement(
RegionRequirement(inputs[0].part_grad, 0/*projection*/,
WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(2, FID_DATA);
FutureMap new_metrics = runtime->execute_index_space(ctx, launcher);
// Update metrics
TaskLauncher metrics_task(UPDATE_METRICS_TASK_ID, TaskArgument(NULL, 0));
metrics_task.add_future(model.current_metrics);
Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is);
for (PointInRectIterator<2> it(part_rect); it(); it++) {
metrics_task.add_future(new_metrics[*it]);
}
((FFModel*)(&model))->current_metrics = runtime->execute_task(ctx, metrics_task);
}
| bfa0f0f1809cd075aa76266d6d8edf28172d0b4c.cu | /* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
LegionRuntime::Logger::Category log_mse("mse");
void FFModel::mse_loss(const std::string& pcname,
const Tensor& _logit,
const Tensor& _label,
const std::string& reduction)
{
AggrMode aggr = AGGR_MODE_NONE;
if (reduction == "sum")
aggr = AGGR_MODE_SUM;
else if (reduction == "average")
aggr = AGGR_MODE_AVG;
else
assert(reduction == "none");
MSELoss* op = new MSELoss(*this, pcname, _logit, _label, aggr);
layers.push_back(op);
}
MSELoss::MSELoss(FFModel& model,
const std::string& pcname,
const Tensor& _logit,
const Tensor& _label,
AggrMode _aggr)
: Op(pcname, _logit, _label), profiling(model.config.profiling),
aggr_mode(_aggr)
{
task_is = model.get_or_create_task_is(2/*numDim*/, pcname);
// Current assume 2D logit and label
assert(_logit.numDim == 2);
assert(_label.numDim == 2);
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is);
}
void MSELoss::init(const FFModel& model)
{}
void MSELoss::forward(const FFModel& model)
{
}
__global__
void multi_category_calc_loss(const float* logits,
const float* labels,
PerfMetrics* perf,
int batch_size,
int out_dim,
float scale)
{
assert(out_dim > 1);
CUDA_KERNEL_LOOP(b, batch_size)
{
float max_val = 0.0f;
int true_label = -1, my_label = -1;
for (int i = 0; i < out_dim; i++) {
if (logits[b*out_dim+i] > max_val) {
max_val = logits[b*out_dim+i];
my_label = i;
}
if (labels[b*out_dim+i] > 0.9f) {
assert(true_label == -1);
true_label = i;
}
}
assert(true_label >= 0);
for (int i = 0; i < out_dim; i++) {
float diff = logits[b*out_dim+i] - labels[b*out_dim+i];
atomicAdd(&(perf->train_loss), diff * diff);
}
atomicAdd(&(perf->train_all), 1);
if (true_label == my_label)
atomicAdd(&(perf->train_correct), 1);
}
}
__global__
void single_category_calc_loss(const float* logits,
const float* labels,
PerfMetrics* perf,
int batch_size,
int out_dim,
float scale)
{
assert(out_dim == 1);
CUDA_KERNEL_LOOP(b, batch_size)
{
float diff = logits[b] - labels[b];
atomicAdd(&(perf->train_loss), diff * diff);
atomicAdd(&(perf->train_all), 1);
if ((logits[b] < 0.5f) == (labels[b] < 0.5f))
atomicAdd(&(perf->train_correct), 1);
}
}
__global__
void mseloss_backward(float* logitsGrad,
const float* logits,
const float* labels,
float factor,
int size)
{
CUDA_KERNEL_LOOP(i, size)
{
logitsGrad[i] = factor * (logits[i] - labels[i]);
}
}
__host__
PerfMetrics MSELoss::backward_task(const Task *task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
assert(regions.size() == 3);
assert(task->regions.size() == 3);
const MSELoss* op = (MSELoss*) task->args;
TensorAccessorR<float, 2> accLogits(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorR<float, 2> accLabels(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> accLogitsGrad(
regions[2], task->regions[2], FID_DATA, ctx, runtime, false/*readOutput*/);
assert(accLogits.rect == accLabels.rect);
assert(accLogits.rect == accLogitsGrad.rect);
int batch_size = accLogits.rect.hi[1] - accLogits.rect.lo[1] + 1;
int out_dim = accLogits.rect.hi[0] - accLogits.rect.lo[0] + 1;
float scale = 1.0f;
switch (op->aggr_mode) {
case AGGR_MODE_SUM:
scale = 1.0f;
break;
case AGGR_MODE_AVG:
// Divided by the global batch size
scale = 1.0f / (op->inputs[0].adim[1]);
break;
default:
assert(false);
}
// Calculate loss
PerfMetrics* perf;
PerfMetrics perf_zc;
perf_zc.train_loss = 0.0f;
perf_zc.train_correct = perf_zc.train_all = 0;
perf_zc.test_correct = perf_zc.test_all = 0;
perf_zc.val_correct = perf_zc.val_all = 0;
checkCUDA(cudaMalloc(&perf, sizeof(PerfMetrics)));
checkCUDA(cudaMemcpy(perf, &perf_zc, sizeof(PerfMetrics), cudaMemcpyHostToDevice));
if (out_dim == 1) {
single_category_calc_loss<<<GET_BLOCKS(batch_size), CUDA_NUM_THREADS>>>(
accLogits.ptr, accLabels.ptr, perf, batch_size, out_dim, scale);
} else {
multi_category_calc_loss<<<GET_BLOCKS(batch_size), CUDA_NUM_THREADS>>>(
accLogits.ptr, accLabels.ptr, perf, batch_size, out_dim, scale);
}
checkCUDA(cudaMemcpy(&perf_zc, perf, sizeof(PerfMetrics), cudaMemcpyDeviceToHost));
checkCUDA(cudaFree(perf));
// Calculate backward
mseloss_backward<<<GET_BLOCKS(accLogits.rect.volume()), CUDA_NUM_THREADS>>>(
accLogitsGrad.ptr, accLogits.ptr, accLabels.ptr,
scale, accLogits.rect.volume());
checkCUDA(cudaDeviceSynchronize());
if (op->profiling) {
print_tensor<2, float>(accLabels.ptr, accLabels.rect, "[MSELoss:label]");
print_tensor<2, float>(accLogits.ptr, accLogits.rect, "[MSELoss:logit]");
print_tensor<2, float>(accLogitsGrad.ptr, accLogitsGrad.rect, "[MSELoss:logit_grad]");
}
return perf_zc;
}
void MSELoss::backward(const FFModel& model)
{
ArgumentMap argmap;
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
IndexLauncher launcher(MSELOSS_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(MSELoss)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// regions[0]: _logit
launcher.add_region_requirement(
RegionRequirement(inputs[0].part, 0/*projection*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// regions[1]: _label
launcher.add_region_requirement(
RegionRequirement(inputs[1].part, 0/*projection*/,
READ_ONLY, EXCLUSIVE, inputs[1].region));
launcher.add_field(1, FID_DATA);
// regions[2]: logit_grad
launcher.add_region_requirement(
RegionRequirement(inputs[0].part_grad, 0/*projection*/,
WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(2, FID_DATA);
FutureMap new_metrics = runtime->execute_index_space(ctx, launcher);
// Update metrics
TaskLauncher metrics_task(UPDATE_METRICS_TASK_ID, TaskArgument(NULL, 0));
metrics_task.add_future(model.current_metrics);
Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is);
for (PointInRectIterator<2> it(part_rect); it(); it++) {
metrics_task.add_future(new_metrics[*it]);
}
((FFModel*)(&model))->current_metrics = runtime->execute_task(ctx, metrics_task);
}
|
162eea145f628d442c0eca4832290c154b54d0fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THCS_GENERIC_FILE
#define THCS_GENERIC_FILE "generic/THCSTensor.cu"
#else
#include "THHThrustAllocator.cuh"
#include "THHTensor.hpp"
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/gather.h>
#include <thrust/generate.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/unique.h>
#if TORCH_HIP_VERSION >= 7000
#include <thrust/system/hip/execution_policy.h>
#endif
#define I_INFO(tensor) getTensorInfo<int64_t, THCIndexTensor, uint64_t>(state, tensor)
#define V_INFO(tensor) getTensorInfo<real, THCTensor, uint64_t>(state, tensor)
THCTensor *THCSTensor_(toDense)(THCState *state, THCSTensor *self) {
THLongStorage *size;
THCTensor *dst;
// set up the new tensor
size = THCSTensor_(newSizeOf)(state, self);
dst = THCTensor_(newWithSize)(state, size, NULL);
THLongStorage_free(size);
THCTensor_(zero)(state, dst);
real one = ScalarConvert<int, real>::to(1);
THCSTensor_(spcadd)(state, dst, dst, one, self);
THCudaCheck(hipGetLastError());
return dst;
}
THCSTensor *THCSTensor_(newCoalesce)(THCState *state, THCSTensor *self) {
ptrdiff_t nnz = self->nnz;
if (nnz < 2) {
self->coalesced = 1;
}
if (self->coalesced) {
THCSTensor_(retain)(state, self);
return self;
}
#if TORCH_HIP_VERSION >= 7000
THCThrustAllocator thrustAlloc(state);
#define THRUST_EXEC(fn, ...) fn(thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), ##__VA_ARGS__)
#else
#define THRUST_EXEC(fn, ...) fn(##__VA_ARGS__)
#endif
// For indices, a simple sort + unique suffices
// For values, we use a custom kernel for segmented reduction (can't use Thrust due to indirection).
THCTensor *values_ = THCSTensor_(newValues)(state, self);
THCTensor *values = THCTensor_(newContiguous)(state, values_);
THCTensor_(free)(state, values_);
int nDimI = self->nDimensionI;
int64_t stride = values->stride[0];
hipStream_t stream = THCState_getCurrentStream(state);
// indices will be modified by Thrust, so we have to clone or use new storage
// here.
THCIndexTensor *indices1D = THCSTensor_(newFlattenedIndices)(state, self, 1);
THCIndexTensor *origIndices = THCIndexTensor_(newWithSize1d)(state, nnz);
THCIndexTensor *uniqueOffsets = THCIndexTensor_(newWithSize1d)(state, nnz);
typedef thrust::device_ptr<int64_t> thrust_ptr;
thrust_ptr indicesIter(THCIndexTensor_(data)(state, indices1D));
thrust_ptr origIndicesIter(THCIndexTensor_(data)(state, origIndices));
thrust_ptr uniqueOffsetsIter(THCIndexTensor_(data)(state, uniqueOffsets));
// Fill sortedOrigIndices with sequential indices
thrust::counting_iterator<int64_t> countIterI(TH_INDEX_BASE);
thrust::counting_iterator<int64_t> countIterO(TH_INDEX_BASE);
THRUST_EXEC(thrust::copy, countIterI, countIterI + nnz, origIndicesIter);
THRUST_EXEC(thrust::copy, countIterO, countIterO + nnz, uniqueOffsetsIter);
THRUST_EXEC(thrust::sort_by_key,
indicesIter, indicesIter + nnz,
origIndicesIter, ThrustLTOp<int64_t>()
);
// this forces device-host synchronization!
thrust::pair<thrust_ptr, thrust_ptr> newEnd = THRUST_EXEC(
thrust::unique_by_key,
indicesIter, indicesIter + nnz,
uniqueOffsetsIter
);
int64_t newNnz = newEnd.first - indicesIter;
THCIndexTensor_(resize2d)(state, indices1D, 1, newNnz);
THCTensor *newValues = THCTensor_(new)(state);
THCTensor_(resizeNd)(state, newValues, values->nDimension, values->size, NULL);
newValues->size[0] = newNnz;
dim3 grid(THCCeilDiv(newNnz, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
hipLaunchKernelGGL(( THCSTensor_coalesceValuesKernel<real, accreal>), dim3(grid), dim3(block), 0, stream,
THCIndexTensor_(data)(state, uniqueOffsets),
THCIndexTensor_(data)(state, origIndices),
THCTensor_(data)(state, values),
THCTensor_(data)(state, newValues),
nnz,
newNnz,
stride
);
// this grid-strided version is slower but probably more flexible
// to different sizes
// int64_t blockX = min(stride, (int64_t) 512);
// dim3 block(blockX, 512 / blockX);
// int64_t grid = min((int64_t) 1024, THCCeilDiv((int64_t) newNnz * stride, (int64_t) block.x * block.y));
// THCSTensor_coalesceValuesKernel_gridStrided<real, accreal><<<grid, block, 0, stream>>>(
// THCIndexTensor_(data)(state, uniqueOffsets),
// THCIndexTensor_(data)(state, origIndices),
// THCTensor_(data)(state, values),
// THCTensor_(data)(state, newValues),
// nnz,
// newNnz,
// stride
// );
THCIndexTensor_(free)(state, origIndices);
THCIndexTensor_(free)(state, uniqueOffsets);
////////////////////////////////////////////////////////////
// unflatten indices if necessary
THCIndexTensor *newIndices;
if (nDimI == 1) {
newIndices = indices1D;
} else {
newIndices = THCIndexTensor_(newWithSize2d)(state, nDimI, newNnz);
THCIndexTensor *indicesSlice = THCIndexTensor_(new)(state);
if (TH_INDEX_BASE != 0) {
THCIndexTensor_(add)(state, indices1D, indices1D, -1);
}
for (int64_t d = nDimI - 1; d >= 0; d--) {
THCIndexTensor_(select)(state, indicesSlice, newIndices, 0, d);
THCIndexTensor_(copy)(state, indicesSlice, indices1D);
THCIndexTensor_(div)(state, indices1D, indices1D, self->size[d]);
THCIndexTensor_(cadd)(state, indicesSlice, indicesSlice, -self->size[d], indices1D);
}
if (TH_INDEX_BASE != 0) {
THCIndexTensor_(add)(state, newIndices, newIndices, 1);
}
THCIndexTensor_(free)(state, indices1D);
THCIndexTensor_(free)(state, indicesSlice);
}
////////////////////////////////////////////////////////////
THLongStorage *size = THCSTensor_(newSizeOf)(state, self);
THCSTensor *dst = THCSTensor_(newWithTensorAndSize)(state, newIndices, newValues, size);
THLongStorage_free(size);
THCTensor_(free)(state, values);
THCIndexTensor_(free)(state, newIndices);
THCTensor_(free)(state, newValues);
dst->coalesced = 1;
THCudaCheck(hipGetLastError());
return dst;
#undef THRUST_EXEC
}
// forceClone is intended to use as a boolean, if set, the result will forced to
// be a clone of self.
THCIndexTensor* THCSTensor_(newFlattenedIndices)(THCState *state, THCSTensor *self, int forceClone) {
THCIndexTensor *indices = THCSTensor_(newIndices)(state, self);
int nDimI = self->nDimensionI;
if (nDimI == 1) {
if (forceClone) {
THCIndexTensor *indices_clone = THCIndexTensor_(newClone)(state, indices);
THCIndexTensor_(free)(state, indices);
return indices_clone;
} else {
return indices;
}
} else {
// FIXME TH_INDEX_BASE
int64_t factor = 1;
THCIndexTensor *indices1D = THCIndexTensor_(newWithSize2d)(state, 1, self->nnz);
THCIndexTensor_(fill)(state, indices1D, TH_INDEX_BASE);
THCIndexTensor *indicesSlice = THCIndexTensor_(new)(state);
for (int64_t d = nDimI - 1; d >= 0; d--) {
THCIndexTensor_(select)(state, indicesSlice, indices, 0, d);
THCIndexTensor_(cadd)(state, indices1D, indices1D, factor, indicesSlice);
if (TH_INDEX_BASE != 0) {
THCIndexTensor_(add)(state, indices1D, indices1D, -TH_INDEX_BASE);
}
factor *= self->size[d];
}
THCIndexTensor_(free)(state, indices);
THCIndexTensor_(free)(state, indicesSlice);
return indices1D;
}
}
// In place transpose
void THCSTensor_(transpose)(THCState *state, THCSTensor *self, int d1, int d2) {
int64_t nDimI = THCSTensor_(nDimensionI)(state, self);
int64_t nDimV = THCSTensor_(nDimensionV)(state, self);
THArgCheck(d1 < nDimI && d2 < nDimI, 1, "Transposed dimensions should be sparse. Got nDimI: %ld, d1: %ld, d2: %ld", nDimI, d1, d2);
THCIndexTensor *indices = THCSTensor_(newIndices)(state, self);
int64_t nnz = THCSTensor_(nnz)(state, self);
THCIndexTensor *buffer = THCIndexTensor_(newWithSize1d)(state, nnz);
THCIndexTensor *slice1 = THCIndexTensor_(newSelect)(state, indices, 0, d1);
THCIndexTensor *slice2 = THCIndexTensor_(newSelect)(state, indices, 0, d2);
THCIndexTensor_(copy)(state, buffer, slice1);
THCIndexTensor_(copy)(state, slice1, slice2);
THCIndexTensor_(copy)(state, slice2, buffer);
int64_t i = self->size[d1];
self->size[d1] = self->size[d2];
self->size[d2] = i;
THCIndexTensor_(free)(state, indices);
THCIndexTensor_(free)(state, buffer);
THCIndexTensor_(free)(state, slice1);
THCIndexTensor_(free)(state, slice2);
}
int THCSTensor_(getDevice)(THCState* state, const THCSTensor* tensor) {
if (!tensor->values || !tensor->values->storage) return -1;
return THCStorage_(getDevice)(state, tensor->values->storage);
}
#endif
| 162eea145f628d442c0eca4832290c154b54d0fa.cu | #ifndef THCS_GENERIC_FILE
#define THCS_GENERIC_FILE "generic/THCSTensor.cu"
#else
#include "THCThrustAllocator.cuh"
#include "THCTensor.hpp"
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/gather.h>
#include <thrust/generate.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/unique.h>
#if CUDA_VERSION >= 7000
#include <thrust/system/cuda/execution_policy.h>
#endif
#define I_INFO(tensor) getTensorInfo<int64_t, THCIndexTensor, uint64_t>(state, tensor)
#define V_INFO(tensor) getTensorInfo<real, THCTensor, uint64_t>(state, tensor)
THCTensor *THCSTensor_(toDense)(THCState *state, THCSTensor *self) {
THLongStorage *size;
THCTensor *dst;
// set up the new tensor
size = THCSTensor_(newSizeOf)(state, self);
dst = THCTensor_(newWithSize)(state, size, NULL);
THLongStorage_free(size);
THCTensor_(zero)(state, dst);
real one = ScalarConvert<int, real>::to(1);
THCSTensor_(spcadd)(state, dst, dst, one, self);
THCudaCheck(cudaGetLastError());
return dst;
}
THCSTensor *THCSTensor_(newCoalesce)(THCState *state, THCSTensor *self) {
ptrdiff_t nnz = self->nnz;
if (nnz < 2) {
self->coalesced = 1;
}
if (self->coalesced) {
THCSTensor_(retain)(state, self);
return self;
}
#if CUDA_VERSION >= 7000
THCThrustAllocator thrustAlloc(state);
#define THRUST_EXEC(fn, ...) fn(thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), ##__VA_ARGS__)
#else
#define THRUST_EXEC(fn, ...) fn(##__VA_ARGS__)
#endif
// For indices, a simple sort + unique suffices
// For values, we use a custom kernel for segmented reduction (can't use Thrust due to indirection).
THCTensor *values_ = THCSTensor_(newValues)(state, self);
THCTensor *values = THCTensor_(newContiguous)(state, values_);
THCTensor_(free)(state, values_);
int nDimI = self->nDimensionI;
int64_t stride = values->stride[0];
cudaStream_t stream = THCState_getCurrentStream(state);
// indices will be modified by Thrust, so we have to clone or use new storage
// here.
THCIndexTensor *indices1D = THCSTensor_(newFlattenedIndices)(state, self, 1);
THCIndexTensor *origIndices = THCIndexTensor_(newWithSize1d)(state, nnz);
THCIndexTensor *uniqueOffsets = THCIndexTensor_(newWithSize1d)(state, nnz);
typedef thrust::device_ptr<int64_t> thrust_ptr;
thrust_ptr indicesIter(THCIndexTensor_(data)(state, indices1D));
thrust_ptr origIndicesIter(THCIndexTensor_(data)(state, origIndices));
thrust_ptr uniqueOffsetsIter(THCIndexTensor_(data)(state, uniqueOffsets));
// Fill sortedOrigIndices with sequential indices
thrust::counting_iterator<int64_t> countIterI(TH_INDEX_BASE);
thrust::counting_iterator<int64_t> countIterO(TH_INDEX_BASE);
THRUST_EXEC(thrust::copy, countIterI, countIterI + nnz, origIndicesIter);
THRUST_EXEC(thrust::copy, countIterO, countIterO + nnz, uniqueOffsetsIter);
THRUST_EXEC(thrust::sort_by_key,
indicesIter, indicesIter + nnz,
origIndicesIter, ThrustLTOp<int64_t>()
);
// this forces device-host synchronization!
thrust::pair<thrust_ptr, thrust_ptr> newEnd = THRUST_EXEC(
thrust::unique_by_key,
indicesIter, indicesIter + nnz,
uniqueOffsetsIter
);
int64_t newNnz = newEnd.first - indicesIter;
THCIndexTensor_(resize2d)(state, indices1D, 1, newNnz);
THCTensor *newValues = THCTensor_(new)(state);
THCTensor_(resizeNd)(state, newValues, values->nDimension, values->size, NULL);
newValues->size[0] = newNnz;
dim3 grid(THCCeilDiv(newNnz, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
THCSTensor_coalesceValuesKernel<real, accreal><<<grid, block, 0, stream>>>(
THCIndexTensor_(data)(state, uniqueOffsets),
THCIndexTensor_(data)(state, origIndices),
THCTensor_(data)(state, values),
THCTensor_(data)(state, newValues),
nnz,
newNnz,
stride
);
// this grid-strided version is slower but probably more flexible
// to different sizes
// int64_t blockX = min(stride, (int64_t) 512);
// dim3 block(blockX, 512 / blockX);
// int64_t grid = min((int64_t) 1024, THCCeilDiv((int64_t) newNnz * stride, (int64_t) block.x * block.y));
// THCSTensor_coalesceValuesKernel_gridStrided<real, accreal><<<grid, block, 0, stream>>>(
// THCIndexTensor_(data)(state, uniqueOffsets),
// THCIndexTensor_(data)(state, origIndices),
// THCTensor_(data)(state, values),
// THCTensor_(data)(state, newValues),
// nnz,
// newNnz,
// stride
// );
THCIndexTensor_(free)(state, origIndices);
THCIndexTensor_(free)(state, uniqueOffsets);
////////////////////////////////////////////////////////////
// unflatten indices if necessary
THCIndexTensor *newIndices;
if (nDimI == 1) {
newIndices = indices1D;
} else {
newIndices = THCIndexTensor_(newWithSize2d)(state, nDimI, newNnz);
THCIndexTensor *indicesSlice = THCIndexTensor_(new)(state);
if (TH_INDEX_BASE != 0) {
THCIndexTensor_(add)(state, indices1D, indices1D, -1);
}
for (int64_t d = nDimI - 1; d >= 0; d--) {
THCIndexTensor_(select)(state, indicesSlice, newIndices, 0, d);
THCIndexTensor_(copy)(state, indicesSlice, indices1D);
THCIndexTensor_(div)(state, indices1D, indices1D, self->size[d]);
THCIndexTensor_(cadd)(state, indicesSlice, indicesSlice, -self->size[d], indices1D);
}
if (TH_INDEX_BASE != 0) {
THCIndexTensor_(add)(state, newIndices, newIndices, 1);
}
THCIndexTensor_(free)(state, indices1D);
THCIndexTensor_(free)(state, indicesSlice);
}
////////////////////////////////////////////////////////////
THLongStorage *size = THCSTensor_(newSizeOf)(state, self);
THCSTensor *dst = THCSTensor_(newWithTensorAndSize)(state, newIndices, newValues, size);
THLongStorage_free(size);
THCTensor_(free)(state, values);
THCIndexTensor_(free)(state, newIndices);
THCTensor_(free)(state, newValues);
dst->coalesced = 1;
THCudaCheck(cudaGetLastError());
return dst;
#undef THRUST_EXEC
}
// forceClone is intended to use as a boolean, if set, the result will forced to
// be a clone of self.
THCIndexTensor* THCSTensor_(newFlattenedIndices)(THCState *state, THCSTensor *self, int forceClone) {
THCIndexTensor *indices = THCSTensor_(newIndices)(state, self);
int nDimI = self->nDimensionI;
if (nDimI == 1) {
if (forceClone) {
THCIndexTensor *indices_clone = THCIndexTensor_(newClone)(state, indices);
THCIndexTensor_(free)(state, indices);
return indices_clone;
} else {
return indices;
}
} else {
// FIXME TH_INDEX_BASE
int64_t factor = 1;
THCIndexTensor *indices1D = THCIndexTensor_(newWithSize2d)(state, 1, self->nnz);
THCIndexTensor_(fill)(state, indices1D, TH_INDEX_BASE);
THCIndexTensor *indicesSlice = THCIndexTensor_(new)(state);
for (int64_t d = nDimI - 1; d >= 0; d--) {
THCIndexTensor_(select)(state, indicesSlice, indices, 0, d);
THCIndexTensor_(cadd)(state, indices1D, indices1D, factor, indicesSlice);
if (TH_INDEX_BASE != 0) {
THCIndexTensor_(add)(state, indices1D, indices1D, -TH_INDEX_BASE);
}
factor *= self->size[d];
}
THCIndexTensor_(free)(state, indices);
THCIndexTensor_(free)(state, indicesSlice);
return indices1D;
}
}
// In place transpose
void THCSTensor_(transpose)(THCState *state, THCSTensor *self, int d1, int d2) {
int64_t nDimI = THCSTensor_(nDimensionI)(state, self);
int64_t nDimV = THCSTensor_(nDimensionV)(state, self);
THArgCheck(d1 < nDimI && d2 < nDimI, 1, "Transposed dimensions should be sparse. Got nDimI: %ld, d1: %ld, d2: %ld", nDimI, d1, d2);
THCIndexTensor *indices = THCSTensor_(newIndices)(state, self);
int64_t nnz = THCSTensor_(nnz)(state, self);
THCIndexTensor *buffer = THCIndexTensor_(newWithSize1d)(state, nnz);
THCIndexTensor *slice1 = THCIndexTensor_(newSelect)(state, indices, 0, d1);
THCIndexTensor *slice2 = THCIndexTensor_(newSelect)(state, indices, 0, d2);
THCIndexTensor_(copy)(state, buffer, slice1);
THCIndexTensor_(copy)(state, slice1, slice2);
THCIndexTensor_(copy)(state, slice2, buffer);
int64_t i = self->size[d1];
self->size[d1] = self->size[d2];
self->size[d2] = i;
THCIndexTensor_(free)(state, indices);
THCIndexTensor_(free)(state, buffer);
THCIndexTensor_(free)(state, slice1);
THCIndexTensor_(free)(state, slice2);
}
int THCSTensor_(getDevice)(THCState* state, const THCSTensor* tensor) {
if (!tensor->values || !tensor->values->storage) return -1;
return THCStorage_(getDevice)(state, tensor->values->storage);
}
#endif
|
22838fd854818da110c6ff5d9451a6536a87f27d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CSI_utils.h"
#include <algorithm>
#include "cudaDeviceManager.h"
#include "complext.h"
#include <math_constants.h>
#include <stdio.h>
using namespace Gadgetron;
template<class T> static __global__ void dft_kernel(complext<T>* __restrict__ kspace, const complext<T>* __restrict__ tspace, T* __restrict__ frequencies, unsigned int spiral_length, unsigned int echoes, unsigned int nfreqs,T dte, T dtt){
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if (idx < spiral_length*nfreqs ){
complext<T> result = 0;
T frequency = frequencies[idx/spiral_length];
T time_offset = dtt*(idx%spiral_length);
unsigned int kpoint = idx%spiral_length;
for (unsigned int i =0; i < echoes; i++){
result += exp(complext<T>(0,-frequency*2*CUDART_PI_F*(dte*i+time_offset)))*tspace[kpoint+i*spiral_length];
}
kspace[idx] = result;
}
}
template<class T> static __global__ void dftH_kernel(const complext<T>* __restrict__ kspace, complext<T>* __restrict__ tspace, T* __restrict__ frequencies, unsigned int spiral_length, unsigned int echoes, unsigned int nfreqs,T dte, T dtt){
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if (idx < spiral_length*echoes ){
complext<T> result = 0;
unsigned int kpoint = idx%spiral_length;
T timeshift = dte*(idx/spiral_length)+dtt*kpoint;
for (unsigned int i =0; i < nfreqs; i++){
result += exp(complext<T>(0,frequencies[i]*2*CUDART_PI_F*timeshift))*kspace[kpoint+i*spiral_length];
}
tspace[idx] = result;
}
}
template<class T>
void Gadgetron::CSI_dft(cuNDArray<complext<T> >* kspace,
cuNDArray<complext<T> >* tspace, thrust::device_vector<T>* frequencies, T dtt, T dte) {
size_t elements = kspace->get_size(0)*kspace->get_size(1);
size_t batches = kspace->get_number_of_elements()/elements;
size_t t_elements = tspace->get_size(0)*tspace->get_size(1);
for (int i = 0; i< batches; i++){
int threadsPerBlock = std::min<int>(elements,cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock(threadsPerBlock);
int totalBlocksPerGrid = (elements+threadsPerBlock-1)/threadsPerBlock;
dim3 dimGrid(totalBlocksPerGrid);
if (totalBlocksPerGrid > cudaDeviceManager::Instance()->max_griddim())
throw std::runtime_error("CSIOperator: Input dimensions too large");
//size_t batchSize = dimGrid.x*dimBlock.x;
hipFuncSetCacheConfig(dft_kernel<T>,hipFuncCachePreferL1);
std::vector<size_t> dims = *tspace->get_dimensions();
// Invoke kernel
hipLaunchKernelGGL(( dft_kernel<T>), dim3(dimGrid), dim3(dimBlock), 0, 0, kspace->get_data_ptr()+i*elements,tspace->get_data_ptr()+i*t_elements,thrust::raw_pointer_cast(frequencies->data()),dims[0],dims[1], frequencies->size(),dte,dtt);
hipDeviceSynchronize();
CHECK_FOR_CUDA_ERROR();
}
}
template<class T>
void Gadgetron::CSI_dftH(cuNDArray<complext<T> >* kspace,
cuNDArray<complext<T> >* tspace, thrust::device_vector<T>* frequencies, T dtt, T dte) {
size_t k_elements = kspace->get_size(0)*kspace->get_size(1);
size_t elements = tspace->get_size(0)*tspace->get_size(1);
size_t batches = tspace->get_number_of_elements()/elements;
for (int i =0; i< batches; i++){
int threadsPerBlock = std::min<int>(elements,cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock(threadsPerBlock);
int totalBlocksPerGrid = (elements+threadsPerBlock-1)/threadsPerBlock;
dim3 dimGrid(totalBlocksPerGrid);
if (totalBlocksPerGrid > cudaDeviceManager::Instance()->max_griddim())
throw std::runtime_error("CSIOperator: Input dimensions too large");
//size_t batchSize = dimGrid.x*dimBlock.x;
hipFuncSetCacheConfig(dftH_kernel<T>,hipFuncCachePreferL1);
std::vector<size_t> dims = *tspace->get_dimensions();
// Invoke kernel
hipLaunchKernelGGL(( dftH_kernel<T>), dim3(dimGrid), dim3(dimBlock), 0, 0, kspace->get_data_ptr()+i*k_elements,tspace->get_data_ptr()+i*elements,thrust::raw_pointer_cast(frequencies->data()),dims[0],dims[1], frequencies->size(),dte,dtt);
CHECK_FOR_CUDA_ERROR();
}
}
template EXPORTHYPER void Gadgetron::CSI_dft<float>(cuNDArray<float_complext>* kspace,cuNDArray<float_complext>* tspace, thrust::device_vector<float>* frequencies, float dtt, float dte);
template EXPORTHYPER void Gadgetron::CSI_dftH<float>(cuNDArray<float_complext>* kspace,cuNDArray<float_complext>* tspace, thrust::device_vector<float>* frequencies, float dtt, float dte);
| 22838fd854818da110c6ff5d9451a6536a87f27d.cu | #include "CSI_utils.h"
#include <algorithm>
#include "cudaDeviceManager.h"
#include "complext.h"
#include <math_constants.h>
#include <stdio.h>
using namespace Gadgetron;
template<class T> static __global__ void dft_kernel(complext<T>* __restrict__ kspace, const complext<T>* __restrict__ tspace, T* __restrict__ frequencies, unsigned int spiral_length, unsigned int echoes, unsigned int nfreqs,T dte, T dtt){
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if (idx < spiral_length*nfreqs ){
complext<T> result = 0;
T frequency = frequencies[idx/spiral_length];
T time_offset = dtt*(idx%spiral_length);
unsigned int kpoint = idx%spiral_length;
for (unsigned int i =0; i < echoes; i++){
result += exp(complext<T>(0,-frequency*2*CUDART_PI_F*(dte*i+time_offset)))*tspace[kpoint+i*spiral_length];
}
kspace[idx] = result;
}
}
template<class T> static __global__ void dftH_kernel(const complext<T>* __restrict__ kspace, complext<T>* __restrict__ tspace, T* __restrict__ frequencies, unsigned int spiral_length, unsigned int echoes, unsigned int nfreqs,T dte, T dtt){
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if (idx < spiral_length*echoes ){
complext<T> result = 0;
unsigned int kpoint = idx%spiral_length;
T timeshift = dte*(idx/spiral_length)+dtt*kpoint;
for (unsigned int i =0; i < nfreqs; i++){
result += exp(complext<T>(0,frequencies[i]*2*CUDART_PI_F*timeshift))*kspace[kpoint+i*spiral_length];
}
tspace[idx] = result;
}
}
template<class T>
void Gadgetron::CSI_dft(cuNDArray<complext<T> >* kspace,
cuNDArray<complext<T> >* tspace, thrust::device_vector<T>* frequencies, T dtt, T dte) {
size_t elements = kspace->get_size(0)*kspace->get_size(1);
size_t batches = kspace->get_number_of_elements()/elements;
size_t t_elements = tspace->get_size(0)*tspace->get_size(1);
for (int i = 0; i< batches; i++){
int threadsPerBlock = std::min<int>(elements,cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock(threadsPerBlock);
int totalBlocksPerGrid = (elements+threadsPerBlock-1)/threadsPerBlock;
dim3 dimGrid(totalBlocksPerGrid);
if (totalBlocksPerGrid > cudaDeviceManager::Instance()->max_griddim())
throw std::runtime_error("CSIOperator: Input dimensions too large");
//size_t batchSize = dimGrid.x*dimBlock.x;
cudaFuncSetCacheConfig(dft_kernel<T>,cudaFuncCachePreferL1);
std::vector<size_t> dims = *tspace->get_dimensions();
// Invoke kernel
dft_kernel<T><<<dimGrid, dimBlock>>>(kspace->get_data_ptr()+i*elements,tspace->get_data_ptr()+i*t_elements,thrust::raw_pointer_cast(frequencies->data()),dims[0],dims[1], frequencies->size(),dte,dtt);
cudaThreadSynchronize();
CHECK_FOR_CUDA_ERROR();
}
}
template<class T>
void Gadgetron::CSI_dftH(cuNDArray<complext<T> >* kspace,
cuNDArray<complext<T> >* tspace, thrust::device_vector<T>* frequencies, T dtt, T dte) {
size_t k_elements = kspace->get_size(0)*kspace->get_size(1);
size_t elements = tspace->get_size(0)*tspace->get_size(1);
size_t batches = tspace->get_number_of_elements()/elements;
for (int i =0; i< batches; i++){
int threadsPerBlock = std::min<int>(elements,cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock(threadsPerBlock);
int totalBlocksPerGrid = (elements+threadsPerBlock-1)/threadsPerBlock;
dim3 dimGrid(totalBlocksPerGrid);
if (totalBlocksPerGrid > cudaDeviceManager::Instance()->max_griddim())
throw std::runtime_error("CSIOperator: Input dimensions too large");
//size_t batchSize = dimGrid.x*dimBlock.x;
cudaFuncSetCacheConfig(dftH_kernel<T>,cudaFuncCachePreferL1);
std::vector<size_t> dims = *tspace->get_dimensions();
// Invoke kernel
dftH_kernel<T><<<dimGrid, dimBlock>>>(kspace->get_data_ptr()+i*k_elements,tspace->get_data_ptr()+i*elements,thrust::raw_pointer_cast(frequencies->data()),dims[0],dims[1], frequencies->size(),dte,dtt);
CHECK_FOR_CUDA_ERROR();
}
}
template EXPORTHYPER void Gadgetron::CSI_dft<float>(cuNDArray<float_complext>* kspace,cuNDArray<float_complext>* tspace, thrust::device_vector<float>* frequencies, float dtt, float dte);
template EXPORTHYPER void Gadgetron::CSI_dftH<float>(cuNDArray<float_complext>* kspace,cuNDArray<float_complext>* tspace, thrust::device_vector<float>* frequencies, float dtt, float dte);
|
88367480addacbac5bb822f2a82be22474e28695.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vectorSubtraction.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
const double *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
double *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
int numElements = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vectorSubtraction), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,numElements);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vectorSubtraction), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,numElements);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vectorSubtraction), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,numElements);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 88367480addacbac5bb822f2a82be22474e28695.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vectorSubtraction.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
const double *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
double *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
int numElements = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vectorSubtraction<<<gridBlock,threadBlock>>>(A,B,C,numElements);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vectorSubtraction<<<gridBlock,threadBlock>>>(A,B,C,numElements);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vectorSubtraction<<<gridBlock,threadBlock>>>(A,B,C,numElements);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
1110277c92c21398dd482a2f147bb134def2d075.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "CudaContext.hh"
#include "CudaDevice.hh"
#include "utils.hh"
namespace CUDA {
Context::Context() {
}
Context::~Context() {
}
int Context::getNumDevices() const {
int numDevices;
hipError_t cec;
cec = hipGetDeviceCount( &numDevices ); CHKCERR( cec );
return numDevices;
}
}
| 1110277c92c21398dd482a2f147bb134def2d075.cu | #include <stdlib.h>
#include <cuda.h>
#include "CudaContext.hh"
#include "CudaDevice.hh"
#include "utils.hh"
namespace CUDA {
Context::Context() {
}
Context::~Context() {
}
int Context::getNumDevices() const {
int numDevices;
cudaError_t cec;
cec = cudaGetDeviceCount( &numDevices ); CHKCERR( cec );
return numDevices;
}
}
|
106aeab2a21daef48f33c3586f6bb222d7f3d983.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include "class.hpp"
//#include "force.hpp"
#include<FDPS_particle_simulator.hpp>
#include "cuda_pointer.h"
#include "force_gpu_cuda.hpp"
enum{
N_THREAD_GPU = 32,
N_WALK_LIMIT = 1000,
NI_LIMIT = N_WALK_LIMIT*1000,
NJ_LIMIT = N_WALK_LIMIT*10000,
};
struct EpiGPU{
float3 pos;
int id_walk;
};
struct EpjGPU{
float4 posm;
};
struct ForceGPU{
float4 accp;
};
inline __device__ float4 dev_gravity(
float eps2,
float3 ipos,
float4 jposm,
float4 accp)
{
float dx = jposm.x - ipos.x;
float dy = jposm.y - ipos.y;
float dz = jposm.z - ipos.z;
float r2 = eps2 + dx*dx + dy*dy + dz*dz;
float rinv = rsqrtf(r2);
float pij = jposm.w * rinv;
float mri3 = rinv*rinv * pij;
accp.x += mri3 * dx;
accp.y += mri3 * dy;
accp.z += mri3 * dz;
accp.w -= pij;
return accp;
}
#if 0
__global__ void ForceKernel(
const int2 * ij_disp,
const EpiGPU * epi,
const EpjGPU * epj,
ForceGPU * force,
const float eps2)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
const float3 ipos = epi[tid].pos;
const int j_head = ij_disp[epi[tid].id_walk ].y;
const int j_tail = ij_disp[epi[tid].id_walk+1].y;
float4 accp = make_float4(0.f, 0.f, 0.f, 0.f);
for(int j=j_head; j<j_tail; j++){
float4 jposm = epj[j].posm;
accp = dev_gravity(eps2, ipos, jposm, accp);
}
force[tid].accp = accp;
}
#else
__device__ float4 ForceKernel_1walk(
float4 *jpsh,
const float3 ipos,
const int id_walk,
const int2 *ij_disp,
const EpjGPU *epj,
float4 accp,
const float eps2)
{
const int tid = threadIdx.x;
const int j_head = ij_disp[id_walk ].y;
const int j_tail = ij_disp[id_walk+1].y;
for(int j=j_head; j<j_tail; j+=N_THREAD_GPU){
// __syncthreads();
jpsh[tid] = ((float4 *)(epj + j)) [tid];
// __syncthreads();
if(j_tail-j < N_THREAD_GPU){
for(int jj=0; jj<j_tail-j; jj++){
accp = dev_gravity(eps2, ipos, jpsh[jj], accp);
}
}else{
#pragma unroll
for(int jj=0; jj<N_THREAD_GPU; jj++){
accp = dev_gravity(eps2, ipos, jpsh[jj], accp);
}
}
}
return accp;
}
__device__ float4 ForceKernel_2walk(
float4 jpsh[2][N_THREAD_GPU],
const float3 ipos,
const int id_walk,
const int iwalk0,
const int iwalk1,
const int2 *ij_disp,
const EpjGPU *epj,
float4 accp,
const float eps2)
{
const int jbeg0 = ij_disp[iwalk0].y;
const int jbeg1 = ij_disp[iwalk1].y;
const int jend0 = ij_disp[iwalk0 + 1].y;
const int jend1 = ij_disp[iwalk1 + 1].y;
const int nj0 = jend0 - jbeg0;
const int nj1 = jend1 - jbeg1;
const int nj_longer = nj0 > nj1 ? nj0 : nj1;
const int nj_shorter = nj0 > nj1 ? nj1 : nj0;
const int walk_longer= nj0 > nj1 ? 0 : 1;
const int jbeg_longer = nj0 > nj1 ? jbeg0 : jbeg1;
const int mywalk = id_walk==iwalk0 ? 0 : 1;
const int tid = threadIdx.x;
for(int j=0; j<nj_shorter; j+=N_THREAD_GPU){
jpsh[0][tid] = ((float4 *)(epj + jbeg0 + j)) [tid];
jpsh[1][tid] = ((float4 *)(epj + jbeg1 + j)) [tid];
if(nj_shorter-j < N_THREAD_GPU){
for(int jj=0; jj<nj_shorter-j; jj++){
accp = dev_gravity(eps2, ipos, jpsh[mywalk][jj], accp);
}
}else{
#pragma unroll
for(int jj=0; jj<N_THREAD_GPU; jj++){
accp = dev_gravity(eps2, ipos, jpsh[mywalk][jj], accp);
}
}
}
for(int j=nj_shorter; j<nj_longer; j+=N_THREAD_GPU){
jpsh[0][tid] = ((float4 *)(epj + jbeg_longer + j)) [tid];
int jrem = nj_longer - j;
if(jrem < N_THREAD_GPU){
for(int jj=0; jj<jrem; jj++){
if(mywalk == walk_longer)
accp = dev_gravity(eps2, ipos, jpsh[0][jj], accp);
}
}else{
#pragma unroll
for(int jj=0; jj<N_THREAD_GPU; jj++){
if(mywalk == walk_longer)
accp = dev_gravity(eps2, ipos, jpsh[0][jj], accp);
}
}
}
return accp;
}
__device__ float4 ForceKernel_multiwalk(
const float3 ipos,
const int id_walk,
const int2 *ij_disp,
const EpjGPU *epj,
float4 accp,
const float eps2)
{
const int j_head = ij_disp[id_walk ].y;
const int j_tail = ij_disp[id_walk+1].y;
#if 1
for(int j=j_head; j<j_tail; j++){
float4 jposm = epj[j].posm;
accp = dev_gravity(eps2, ipos, jposm, accp);
}
#else
int njmin = j_tail - j_head;
njmin = min(njmin, __shfl_xor(njmin, 1));
njmin = min(njmin, __shfl_xor(njmin, 2));
njmin = min(njmin, __shfl_xor(njmin, 4));
njmin = min(njmin, __shfl_xor(njmin, 8));
njmin = min(njmin, __shfl_xor(njmin, 16));
njmin &= 3;;
for(int j=0; j<njmin; j+=4){
#pragma unroll 4
for(int jj=0; jj<4; jj++){
float4 jposm = epj[j_head + j + jj].posm;
float4 jposm = jpf[jj];
accp = dev_gravity(eps2, ipos, jposm, accp);
}
}
for(int j=j_head+njmin; j<j_tail; j++){
float4 jposm = epj[j].posm;
accp = dev_gravity(eps2, ipos, jposm, accp);
}
#endif
return accp;
}
__global__ void ForceKernel(
const int2 * ij_disp,
const EpiGPU * epi,
const EpjGPU * epj,
ForceGPU * force,
const float eps2)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
float3 ipos = epi[tid].pos;
int id_walk = epi[tid].id_walk;
float4 accp = make_float4(0.f, 0.f, 0.f, 0.f);
int t_head = blockDim.x * blockIdx.x;
int t_tail = t_head + N_THREAD_GPU - 1;
int nwalk_in_block = 1 + (epi[t_tail].id_walk - epi[t_head].id_walk);
__shared__ float4 jpsh[2][N_THREAD_GPU];
if(1 == nwalk_in_block){
accp = ForceKernel_1walk(jpsh[0], ipos, id_walk, ij_disp, epj, accp, eps2);
} else if(2 == nwalk_in_block){
// accp = ForceKernel_multiwalk(ipos, id_walk, ij_disp, epj, accp, eps2);
int iwalk0 = epi[t_head].id_walk;
int iwalk1 = epi[t_tail].id_walk;
accp = ForceKernel_2walk(jpsh, ipos, id_walk, iwalk0, iwalk1, ij_disp, epj, accp, eps2);
} else{
accp = ForceKernel_multiwalk(ipos, id_walk, ij_disp, epj, accp, eps2);
}
force[tid].accp = accp;
}
#endif
static cudaPointer<EpiGPU> dev_epi;
static cudaPointer<EpjGPU> dev_epj;
static cudaPointer<ForceGPU> dev_force;
static cudaPointer<int2> ij_disp;
static bool init_call = true;
PS::S32 DispatchKernelWithSP(
const PS::S32 tag,
const PS::S32 n_walk,
const FPGrav *epi[],
const PS::S32 n_epi[],
const FPGrav *epj[],
const PS::S32 n_epj[],
const PS::SPJMonopole *spj[],
const PS::S32 n_spj[]){
assert(n_walk <= N_WALK_LIMIT);
if(init_call){
dev_epi .allocate(NI_LIMIT);
dev_epj .allocate(NJ_LIMIT);
dev_force.allocate(NI_LIMIT);
ij_disp .allocate(N_WALK_LIMIT+2);
init_call = false;
}
const float eps2 = FPGrav::eps * FPGrav::eps;
ij_disp[0].x = 0;
ij_disp[0].y = 0;
for(int k=0; k<n_walk; k++){
ij_disp[k+1].x = ij_disp[k].x + n_epi[k];
ij_disp[k+1].y = ij_disp[k].y + (n_epj[k] + n_spj[k]);
}
ij_disp[n_walk+1] = ij_disp[n_walk];
assert(ij_disp[n_walk].x < NI_LIMIT);
assert(ij_disp[n_walk].y < NJ_LIMIT);
ij_disp.htod(n_walk + 2);
int ni_tot_reg = ij_disp[n_walk].x;
if(ni_tot_reg % N_THREAD_GPU){
ni_tot_reg /= N_THREAD_GPU;
ni_tot_reg++;
ni_tot_reg *= N_THREAD_GPU;
}
int ni_tot = 0;
int nj_tot = 0;
for(int iw=0; iw<n_walk; iw++){
for(int i=0; i<n_epi[iw]; i++){
dev_epi[ni_tot].pos.x = epi[iw][i].pos.x;
dev_epi[ni_tot].pos.y = epi[iw][i].pos.y;
dev_epi[ni_tot].pos.z = epi[iw][i].pos.z;
dev_epi[ni_tot].id_walk = iw;
ni_tot++;
}
for(int j=0; j<n_epj[iw]; j++){
dev_epj[nj_tot].posm.x = epj[iw][j].pos.x;
dev_epj[nj_tot].posm.y = epj[iw][j].pos.y;
dev_epj[nj_tot].posm.z = epj[iw][j].pos.z;
dev_epj[nj_tot].posm.w = epj[iw][j].mass;
nj_tot++;
}
for(int j=0; j<n_spj[iw]; j++){
dev_epj[nj_tot].posm.x = spj[iw][j].pos.x;
dev_epj[nj_tot].posm.y = spj[iw][j].pos.y;
dev_epj[nj_tot].posm.z = spj[iw][j].pos.z;
dev_epj[nj_tot].posm.w = spj[iw][j].getCharge();
nj_tot++;
}
}
for(int i=ni_tot; i<ni_tot_reg; i++){
dev_epi[i].id_walk = n_walk;
}
dev_epi.htod(ni_tot_reg);
dev_epj.htod(nj_tot);
int nblocks = ni_tot_reg / N_THREAD_GPU;
int nthreads = N_THREAD_GPU;
hipLaunchKernelGGL(( ForceKernel) , dim3(nblocks), dim3(nthreads), 0, 0, ij_disp, dev_epi, dev_epj, dev_force, eps2);
return 0;
}
PS::S32 RetrieveKernel(const PS::S32 tag,
const PS::S32 n_walk,
const PS::S32 ni[],
FPGrav *force[])
{
int ni_tot = 0;
for(int k=0; k<n_walk; k++){
ni_tot += ni[k];
}
dev_force.dtoh(ni_tot);
int n_cnt = 0;
for(int iw=0; iw<n_walk; iw++){
for(int i=0; i<ni[iw]; i++){
force[iw][i].acc.x = dev_force[n_cnt].accp.x;
force[iw][i].acc.y = dev_force[n_cnt].accp.y;
force[iw][i].acc.z = dev_force[n_cnt].accp.z;
force[iw][i].pot = dev_force[n_cnt].accp.w;
n_cnt++;
}
}
return 0;
}
| 106aeab2a21daef48f33c3586f6bb222d7f3d983.cu | //#include "class.hpp"
//#include "force.hpp"
#include<FDPS_particle_simulator.hpp>
#include "cuda_pointer.h"
#include "force_gpu_cuda.hpp"
enum{
N_THREAD_GPU = 32,
N_WALK_LIMIT = 1000,
NI_LIMIT = N_WALK_LIMIT*1000,
NJ_LIMIT = N_WALK_LIMIT*10000,
};
struct EpiGPU{
float3 pos;
int id_walk;
};
struct EpjGPU{
float4 posm;
};
struct ForceGPU{
float4 accp;
};
inline __device__ float4 dev_gravity(
float eps2,
float3 ipos,
float4 jposm,
float4 accp)
{
float dx = jposm.x - ipos.x;
float dy = jposm.y - ipos.y;
float dz = jposm.z - ipos.z;
float r2 = eps2 + dx*dx + dy*dy + dz*dz;
float rinv = rsqrtf(r2);
float pij = jposm.w * rinv;
float mri3 = rinv*rinv * pij;
accp.x += mri3 * dx;
accp.y += mri3 * dy;
accp.z += mri3 * dz;
accp.w -= pij;
return accp;
}
#if 0
__global__ void ForceKernel(
const int2 * ij_disp,
const EpiGPU * epi,
const EpjGPU * epj,
ForceGPU * force,
const float eps2)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
const float3 ipos = epi[tid].pos;
const int j_head = ij_disp[epi[tid].id_walk ].y;
const int j_tail = ij_disp[epi[tid].id_walk+1].y;
float4 accp = make_float4(0.f, 0.f, 0.f, 0.f);
for(int j=j_head; j<j_tail; j++){
float4 jposm = epj[j].posm;
accp = dev_gravity(eps2, ipos, jposm, accp);
}
force[tid].accp = accp;
}
#else
__device__ float4 ForceKernel_1walk(
float4 *jpsh,
const float3 ipos,
const int id_walk,
const int2 *ij_disp,
const EpjGPU *epj,
float4 accp,
const float eps2)
{
const int tid = threadIdx.x;
const int j_head = ij_disp[id_walk ].y;
const int j_tail = ij_disp[id_walk+1].y;
for(int j=j_head; j<j_tail; j+=N_THREAD_GPU){
// __syncthreads();
jpsh[tid] = ((float4 *)(epj + j)) [tid];
// __syncthreads();
if(j_tail-j < N_THREAD_GPU){
for(int jj=0; jj<j_tail-j; jj++){
accp = dev_gravity(eps2, ipos, jpsh[jj], accp);
}
}else{
#pragma unroll
for(int jj=0; jj<N_THREAD_GPU; jj++){
accp = dev_gravity(eps2, ipos, jpsh[jj], accp);
}
}
}
return accp;
}
__device__ float4 ForceKernel_2walk(
float4 jpsh[2][N_THREAD_GPU],
const float3 ipos,
const int id_walk,
const int iwalk0,
const int iwalk1,
const int2 *ij_disp,
const EpjGPU *epj,
float4 accp,
const float eps2)
{
const int jbeg0 = ij_disp[iwalk0].y;
const int jbeg1 = ij_disp[iwalk1].y;
const int jend0 = ij_disp[iwalk0 + 1].y;
const int jend1 = ij_disp[iwalk1 + 1].y;
const int nj0 = jend0 - jbeg0;
const int nj1 = jend1 - jbeg1;
const int nj_longer = nj0 > nj1 ? nj0 : nj1;
const int nj_shorter = nj0 > nj1 ? nj1 : nj0;
const int walk_longer= nj0 > nj1 ? 0 : 1;
const int jbeg_longer = nj0 > nj1 ? jbeg0 : jbeg1;
const int mywalk = id_walk==iwalk0 ? 0 : 1;
const int tid = threadIdx.x;
for(int j=0; j<nj_shorter; j+=N_THREAD_GPU){
jpsh[0][tid] = ((float4 *)(epj + jbeg0 + j)) [tid];
jpsh[1][tid] = ((float4 *)(epj + jbeg1 + j)) [tid];
if(nj_shorter-j < N_THREAD_GPU){
for(int jj=0; jj<nj_shorter-j; jj++){
accp = dev_gravity(eps2, ipos, jpsh[mywalk][jj], accp);
}
}else{
#pragma unroll
for(int jj=0; jj<N_THREAD_GPU; jj++){
accp = dev_gravity(eps2, ipos, jpsh[mywalk][jj], accp);
}
}
}
for(int j=nj_shorter; j<nj_longer; j+=N_THREAD_GPU){
jpsh[0][tid] = ((float4 *)(epj + jbeg_longer + j)) [tid];
int jrem = nj_longer - j;
if(jrem < N_THREAD_GPU){
for(int jj=0; jj<jrem; jj++){
if(mywalk == walk_longer)
accp = dev_gravity(eps2, ipos, jpsh[0][jj], accp);
}
}else{
#pragma unroll
for(int jj=0; jj<N_THREAD_GPU; jj++){
if(mywalk == walk_longer)
accp = dev_gravity(eps2, ipos, jpsh[0][jj], accp);
}
}
}
return accp;
}
__device__ float4 ForceKernel_multiwalk(
const float3 ipos,
const int id_walk,
const int2 *ij_disp,
const EpjGPU *epj,
float4 accp,
const float eps2)
{
const int j_head = ij_disp[id_walk ].y;
const int j_tail = ij_disp[id_walk+1].y;
#if 1
for(int j=j_head; j<j_tail; j++){
float4 jposm = epj[j].posm;
accp = dev_gravity(eps2, ipos, jposm, accp);
}
#else
int njmin = j_tail - j_head;
njmin = min(njmin, __shfl_xor(njmin, 1));
njmin = min(njmin, __shfl_xor(njmin, 2));
njmin = min(njmin, __shfl_xor(njmin, 4));
njmin = min(njmin, __shfl_xor(njmin, 8));
njmin = min(njmin, __shfl_xor(njmin, 16));
njmin &= 3;;
for(int j=0; j<njmin; j+=4){
#pragma unroll 4
for(int jj=0; jj<4; jj++){
float4 jposm = epj[j_head + j + jj].posm;
float4 jposm = jpf[jj];
accp = dev_gravity(eps2, ipos, jposm, accp);
}
}
for(int j=j_head+njmin; j<j_tail; j++){
float4 jposm = epj[j].posm;
accp = dev_gravity(eps2, ipos, jposm, accp);
}
#endif
return accp;
}
__global__ void ForceKernel(
const int2 * ij_disp,
const EpiGPU * epi,
const EpjGPU * epj,
ForceGPU * force,
const float eps2)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
float3 ipos = epi[tid].pos;
int id_walk = epi[tid].id_walk;
float4 accp = make_float4(0.f, 0.f, 0.f, 0.f);
int t_head = blockDim.x * blockIdx.x;
int t_tail = t_head + N_THREAD_GPU - 1;
int nwalk_in_block = 1 + (epi[t_tail].id_walk - epi[t_head].id_walk);
__shared__ float4 jpsh[2][N_THREAD_GPU];
if(1 == nwalk_in_block){
accp = ForceKernel_1walk(jpsh[0], ipos, id_walk, ij_disp, epj, accp, eps2);
} else if(2 == nwalk_in_block){
// accp = ForceKernel_multiwalk(ipos, id_walk, ij_disp, epj, accp, eps2);
int iwalk0 = epi[t_head].id_walk;
int iwalk1 = epi[t_tail].id_walk;
accp = ForceKernel_2walk(jpsh, ipos, id_walk, iwalk0, iwalk1, ij_disp, epj, accp, eps2);
} else{
accp = ForceKernel_multiwalk(ipos, id_walk, ij_disp, epj, accp, eps2);
}
force[tid].accp = accp;
}
#endif
static cudaPointer<EpiGPU> dev_epi;
static cudaPointer<EpjGPU> dev_epj;
static cudaPointer<ForceGPU> dev_force;
static cudaPointer<int2> ij_disp;
static bool init_call = true;
PS::S32 DispatchKernelWithSP(
const PS::S32 tag,
const PS::S32 n_walk,
const FPGrav *epi[],
const PS::S32 n_epi[],
const FPGrav *epj[],
const PS::S32 n_epj[],
const PS::SPJMonopole *spj[],
const PS::S32 n_spj[]){
assert(n_walk <= N_WALK_LIMIT);
if(init_call){
dev_epi .allocate(NI_LIMIT);
dev_epj .allocate(NJ_LIMIT);
dev_force.allocate(NI_LIMIT);
ij_disp .allocate(N_WALK_LIMIT+2);
init_call = false;
}
const float eps2 = FPGrav::eps * FPGrav::eps;
ij_disp[0].x = 0;
ij_disp[0].y = 0;
for(int k=0; k<n_walk; k++){
ij_disp[k+1].x = ij_disp[k].x + n_epi[k];
ij_disp[k+1].y = ij_disp[k].y + (n_epj[k] + n_spj[k]);
}
ij_disp[n_walk+1] = ij_disp[n_walk];
assert(ij_disp[n_walk].x < NI_LIMIT);
assert(ij_disp[n_walk].y < NJ_LIMIT);
ij_disp.htod(n_walk + 2);
int ni_tot_reg = ij_disp[n_walk].x;
if(ni_tot_reg % N_THREAD_GPU){
ni_tot_reg /= N_THREAD_GPU;
ni_tot_reg++;
ni_tot_reg *= N_THREAD_GPU;
}
int ni_tot = 0;
int nj_tot = 0;
for(int iw=0; iw<n_walk; iw++){
for(int i=0; i<n_epi[iw]; i++){
dev_epi[ni_tot].pos.x = epi[iw][i].pos.x;
dev_epi[ni_tot].pos.y = epi[iw][i].pos.y;
dev_epi[ni_tot].pos.z = epi[iw][i].pos.z;
dev_epi[ni_tot].id_walk = iw;
ni_tot++;
}
for(int j=0; j<n_epj[iw]; j++){
dev_epj[nj_tot].posm.x = epj[iw][j].pos.x;
dev_epj[nj_tot].posm.y = epj[iw][j].pos.y;
dev_epj[nj_tot].posm.z = epj[iw][j].pos.z;
dev_epj[nj_tot].posm.w = epj[iw][j].mass;
nj_tot++;
}
for(int j=0; j<n_spj[iw]; j++){
dev_epj[nj_tot].posm.x = spj[iw][j].pos.x;
dev_epj[nj_tot].posm.y = spj[iw][j].pos.y;
dev_epj[nj_tot].posm.z = spj[iw][j].pos.z;
dev_epj[nj_tot].posm.w = spj[iw][j].getCharge();
nj_tot++;
}
}
for(int i=ni_tot; i<ni_tot_reg; i++){
dev_epi[i].id_walk = n_walk;
}
dev_epi.htod(ni_tot_reg);
dev_epj.htod(nj_tot);
int nblocks = ni_tot_reg / N_THREAD_GPU;
int nthreads = N_THREAD_GPU;
ForceKernel <<<nblocks, nthreads>>> (ij_disp, dev_epi, dev_epj, dev_force, eps2);
return 0;
}
PS::S32 RetrieveKernel(const PS::S32 tag,
const PS::S32 n_walk,
const PS::S32 ni[],
FPGrav *force[])
{
int ni_tot = 0;
for(int k=0; k<n_walk; k++){
ni_tot += ni[k];
}
dev_force.dtoh(ni_tot);
int n_cnt = 0;
for(int iw=0; iw<n_walk; iw++){
for(int i=0; i<ni[iw]; i++){
force[iw][i].acc.x = dev_force[n_cnt].accp.x;
force[iw][i].acc.y = dev_force[n_cnt].accp.y;
force[iw][i].acc.z = dev_force[n_cnt].accp.z;
force[iw][i].pot = dev_force[n_cnt].accp.w;
n_cnt++;
}
}
return 0;
}
|
96f4815d6f5b55b1208d302f448a3b60b6dcc5f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuNSearchDeviceData.h"
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/gather.h>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/fill.h>
#ifdef DEBUG
#define PRINT_STATS true
#define USE_TIMING(x) x;
#else
#define PRINT_STATS false
#define USE_TIMING(x)
#endif
#include "Timing.h"
#define PRINT_STATS true
#define USE_TIMING(x) x;
#include "PointSetImplementation.h"
#include "GridInfo.h"
#include "cuda_helper.h"
#include "cuNSearchKernels.cuh"
namespace cuNSearch
{
void cuNSearchDeviceData::computeMinMax(PointSet &pointSet)
{
if (pointSet.n_points() == 0)
return;
auto pointSetImpl = pointSet.impl.get();
Int3 data[2];
data[0] = Int3(std::numeric_limits<int>().max(), std::numeric_limits<int>().max(), std::numeric_limits<int>().max());
data[1] = Int3(std::numeric_limits<int>().min(), std::numeric_limits<int>().min(), std::numeric_limits<int>().min());
d_MinMax.resize(2);
CudaHelper::MemcpyHostToDevice(data, CudaHelper::GetPointer(d_MinMax), 2);
kComputeMinMax << <pointSetImpl->BlockStartsForParticles, pointSetImpl->ThreadsPerBlock >> > (
(Real3*)CudaHelper::GetPointer(pointSetImpl->d_Particles),
static_cast<unsigned int>(pointSet.n_points()),
m_SearchRadius,
CudaHelper::GetPointer(d_MinMax),
CudaHelper::GetPointer(d_MinMax) + 1
);
CudaHelper::CheckLastError();
CudaHelper::DeviceSynchronize();
CudaHelper::MemcpyDeviceToHost(CudaHelper::GetPointer(d_MinMax), data, 2);
Int3 minCell = data[0];
Int3 maxCell = data[1];
pointSetImpl->Min.x = minCell.x * m_SearchRadius;
pointSetImpl->Min.y = minCell.y * m_SearchRadius;
pointSetImpl->Min.z = minCell.z * m_SearchRadius;
pointSetImpl->Max.x = maxCell.x * m_SearchRadius;
pointSetImpl->Max.y = maxCell.y * m_SearchRadius;
pointSetImpl->Max.z = maxCell.z * m_SearchRadius;
//CPU implementation of min max computation
//Real3 cpuMin, cpuMax;
//cpuMin = make_Real3(std::numeric_limits<Real>().max());
//cpuMax = make_Real3(std::numeric_limits<Real>().min());
//Real3 *points = (Real3 *)pointSet.m_x;
//for (size_t i = 0; i < pointSet.n_points(); i++)
//{
// cpuMin.x = ::min(cpuMin.x, points[i].x);
// cpuMin.y = ::min(cpuMin.y, points[i].y);
// cpuMin.z = ::min(cpuMin.z, points[i].z);
// cpuMax.x = ::max(cpuMax.x, points[i].x);
// cpuMax.y = ::max(cpuMax.y, points[i].y);
// cpuMax.z = ::max(cpuMax.z, points[i].z);
//}
}
void cuNSearchDeviceData::computeCellInformation(PointSet &pointSet)
{
if (pointSet.n_points() == 0)
return;
auto pointSetImpl = pointSet.impl.get();
Real3 sceneMin = pointSetImpl->Min;
Real3 sceneMax = pointSetImpl->Max;
GridInfo gridInfo;
gridInfo.ParticleCount = static_cast<uint>(pointSet.n_points());
gridInfo.SquaredSearchRadius = m_SearchRadius * m_SearchRadius;
gridInfo.GridMin = sceneMin;
Real cellSize = m_SearchRadius;
Real3 gridSize = sceneMax - sceneMin;
gridInfo.GridDimension.x = static_cast<unsigned int>(ceil(gridSize.x / cellSize));
gridInfo.GridDimension.y = static_cast<unsigned int>(ceil(gridSize.y / cellSize));
gridInfo.GridDimension.z = static_cast<unsigned int>(ceil(gridSize.z / cellSize));
//Increase grid by 2 cells in each direciton (+4 in each dimension) to skip bounds checks in the kernel
gridInfo.GridDimension.x += 4;
gridInfo.GridDimension.y += 4;
gridInfo.GridDimension.z += 4;
gridInfo.GridMin -= Real3(cellSize, cellSize, cellSize) * (Real)2;
//One meta grid cell contains 8x8x8 grild cells. (512)
gridInfo.MetaGridDimension.x = static_cast<unsigned int>(ceil(gridInfo.GridDimension.x / (float)CUDA_META_GRID_GROUP_SIZE));
gridInfo.MetaGridDimension.y = static_cast<unsigned int>(ceil(gridInfo.GridDimension.y / (float)CUDA_META_GRID_GROUP_SIZE));
gridInfo.MetaGridDimension.z = static_cast<unsigned int>(ceil(gridInfo.GridDimension.z / (float)CUDA_META_GRID_GROUP_SIZE));
// Adjust grid size to multiple of cell size
gridSize.x = gridInfo.GridDimension.x * cellSize;
gridSize.y = gridInfo.GridDimension.y * cellSize;
gridSize.z = gridInfo.GridDimension.z * cellSize;
gridInfo.GridDelta.x = gridInfo.GridDimension.x / gridSize.x;
gridInfo.GridDelta.y = gridInfo.GridDimension.y / gridSize.y;
gridInfo.GridDelta.z = gridInfo.GridDimension.z / gridSize.z;
d_TempSortIndices.resize(gridInfo.ParticleCount);
uint numberOfCells = (gridInfo.MetaGridDimension.x * gridInfo.MetaGridDimension.y * gridInfo.MetaGridDimension.z) * CUDA_META_GRID_BLOCK_SIZE;
pointSet.impl->prepareInternalDataStructures(gridInfo, numberOfCells);
CudaHelper::CheckLastError();
CudaHelper::DeviceSynchronize();
hipMemset(CudaHelper::GetPointer(pointSetImpl->d_CellParticleCounts), 0, CudaHelper::GetSizeInBytes(pointSetImpl->d_CellParticleCounts));
CudaHelper::CheckLastError();
CudaHelper::DeviceSynchronize();
kInsertParticles_Morton << <pointSetImpl->BlockStartsForParticles, pointSetImpl->ThreadsPerBlock >> > (
gridInfo,
(Real3*)CudaHelper::GetPointer(pointSetImpl->d_Particles),
CudaHelper::GetPointer(pointSetImpl->d_ParticleCellIndices),
CudaHelper::GetPointer(pointSetImpl->d_CellParticleCounts),
CudaHelper::GetPointer(d_TempSortIndices)
);
CudaHelper::CheckLastError();
CudaHelper::DeviceSynchronize();
thrust::exclusive_scan(
pointSetImpl->d_CellParticleCounts.begin(),
pointSetImpl->d_CellParticleCounts.end(),
pointSetImpl->d_CellOffsets.begin());
CudaHelper::DeviceSynchronize();
kCountingSortIndices << <pointSetImpl->BlockStartsForParticles, pointSetImpl->ThreadsPerBlock >> > (
gridInfo,
CudaHelper::GetPointer(pointSetImpl->d_ParticleCellIndices),
CudaHelper::GetPointer(pointSetImpl->d_CellOffsets),
CudaHelper::GetPointer(d_TempSortIndices),
CudaHelper::GetPointer(pointSetImpl->d_SortIndices),
CudaHelper::GetPointer(pointSetImpl->d_posInSortedPoints)
);
CudaHelper::DeviceSynchronize();
auto &tempSequence = d_TempSortIndices;
thrust::sequence(tempSequence.begin(), tempSequence.end());
thrust::gather(
pointSetImpl->d_SortIndices.begin(),
pointSetImpl->d_SortIndices.end(),
tempSequence.begin(),
pointSetImpl->d_ReversedSortIndices.begin());
// YZ: use this if the points are actually sorted and reordered in memory
//thrust::sort_by_key(pointSetImpl->d_posInSortedPoints.begin(), pointSetImpl->d_posInSortedPoints.end(), pointSetImpl->d_Particles.begin());
CudaHelper::CheckLastError();
CudaHelper::DeviceSynchronize();
pointSet.sortIndices.resize(pointSetImpl->d_SortIndices.size());
CudaHelper::MemcpyDeviceToHost(CudaHelper::GetPointer(pointSetImpl->d_SortIndices), pointSet.sortIndices.data(), pointSetImpl->d_SortIndices.size());
// YZ: this is just to confirm that d_ReversedSortIndices is exactly the same as d_SortIndices, and so the gather is redundant.
//thrust::host_vector<uint> temp_d_SortIndices(pointSet.sortIndices.size());
//thrust::copy(pointSetImpl->d_ReversedSortIndices.begin(), pointSetImpl->d_ReversedSortIndices.end(), temp_d_SortIndices.begin());
//for (unsigned int i = 0; i < pointSet.sortIndices.size(); i++) {
// fprintf(stdout, "%u, %u\n", pointSet.sortIndices[i], temp_d_SortIndices[i]);
//}
}
void cuNSearchDeviceData::computeNeighborhood(PointSet &queryPointSet, PointSet &pointSet, uint neighborListEntry)
{
if (queryPointSet.n_points() == 0)
return;
auto queryPointSetImpl = queryPointSet.impl.get();
auto pointSetImpl = pointSet.impl.get();
uint particleCount = static_cast<uint>(queryPointSet.n_points());
USE_TIMING(Timing::startTiming("Execute kNeighborCount"));
d_NeighborCounts.resize(particleCount);
kComputeCounts << <queryPointSetImpl->BlockStartsForParticles, queryPointSetImpl->ThreadsPerBlock >> > (
(Real3*)CudaHelper::GetPointer(queryPointSetImpl->d_Particles),
static_cast<unsigned int>(queryPointSet.n_points()),
pointSetImpl->gridInfo,
(Real3*)CudaHelper::GetPointer(pointSetImpl->d_Particles),
CudaHelper::GetPointer(pointSetImpl->d_CellOffsets),
CudaHelper::GetPointer(pointSetImpl->d_CellParticleCounts),
CudaHelper::GetPointer(d_NeighborCounts),
CudaHelper::GetPointer(pointSetImpl->d_ReversedSortIndices)
);
CudaHelper::CheckLastError();
CudaHelper::DeviceSynchronize();
USE_TIMING(Timing::stopTiming(PRINT_STATS));
USE_TIMING(Timing::startTiming("Execute exclusive_scan over counts"));
d_NeighborWriteOffsets.resize(particleCount);
//Prefix sum over neighbor counts
thrust::exclusive_scan(
d_NeighborCounts.begin(),
d_NeighborCounts.end(),
d_NeighborWriteOffsets.begin());
CudaHelper::DeviceSynchronize();
//Compute total amount of neighbors
uint lastOffset = 0;
CudaHelper::MemcpyDeviceToHost(CudaHelper::GetPointer(d_NeighborWriteOffsets) + particleCount - 1, &lastOffset, 1);
uint lastParticleNeighborCount = 0;
CudaHelper::MemcpyDeviceToHost(CudaHelper::GetPointer(d_NeighborCounts) + particleCount - 1, &lastParticleNeighborCount, 1);
uint totalNeighborCount = lastOffset + lastParticleNeighborCount;
d_Neighbors.resize(totalNeighborCount);
CudaHelper::DeviceSynchronize();
USE_TIMING(Timing::stopTiming(PRINT_STATS));
USE_TIMING(Timing::startTiming("Execute kNeighborhoodQueryWithCounts"));
kNeighborhoodQueryWithCounts << <queryPointSetImpl->BlockStartsForParticles, queryPointSetImpl->ThreadsPerBlock >> > (
(Real3*)CudaHelper::GetPointer(queryPointSetImpl->d_Particles),
static_cast<unsigned int>(queryPointSet.n_points()),
pointSetImpl->gridInfo,
(Real3*)CudaHelper::GetPointer(pointSetImpl->d_Particles),
CudaHelper::GetPointer(pointSetImpl->d_CellOffsets),
CudaHelper::GetPointer(pointSetImpl->d_CellParticleCounts),
CudaHelper::GetPointer(d_NeighborWriteOffsets),
CudaHelper::GetPointer(d_Neighbors),
CudaHelper::GetPointer(pointSetImpl->d_ReversedSortIndices)
);
CudaHelper::CheckLastError();
CudaHelper::DeviceSynchronize();
USE_TIMING(Timing::stopTiming(PRINT_STATS));
//Copy data to host
USE_TIMING(Timing::startTiming("Neighbor copy from device to host - resize"));
auto &neighborSet = queryPointSet.neighbors[neighborListEntry];
// YZ: this check will fail from the 2nd run onwards
if (neighborSet.NeighborCountAllocationSize < totalNeighborCount)
{
if (neighborSet.NeighborCountAllocationSize != 0)
{
hipHostFree(neighborSet.Neighbors);
}
neighborSet.NeighborCountAllocationSize = static_cast<unsigned int>(totalNeighborCount * 1.5);
hipHostMalloc(&neighborSet.Neighbors, sizeof(uint) * neighborSet.NeighborCountAllocationSize);
}
if (neighborSet.ParticleCountAllocationSize < particleCount)
{
if (neighborSet.ParticleCountAllocationSize != 0)
{
hipHostFree(neighborSet.Offsets);
hipHostFree(neighborSet.Counts);
}
neighborSet.ParticleCountAllocationSize = static_cast<unsigned int>(particleCount * 1.5);
hipHostMalloc(&neighborSet.Offsets, sizeof(uint) * neighborSet.ParticleCountAllocationSize);
hipHostMalloc(&neighborSet.Counts, sizeof(uint) * neighborSet.ParticleCountAllocationSize);
}
USE_TIMING(Timing::stopTiming(PRINT_STATS));
USE_TIMING(Timing::startTiming("Neighbor copy from device to host - MemcpyDeviceToHost"));
if (PRINT_STATS)
{
unsigned long bytesToCopy = totalNeighborCount * 4 + particleCount * 2 * 4;
printf("Total neighbors: %d \n", totalNeighborCount);
printf("Average neighbors: %d \n", totalNeighborCount / particleCount);
printf("Expected amount: %f MB \n", bytesToCopy / (1024.0f * 1024.0f));
}
CudaHelper::MemcpyDeviceToHost(CudaHelper::GetPointer(d_Neighbors), neighborSet.Neighbors, totalNeighborCount);
CudaHelper::MemcpyDeviceToHost(CudaHelper::GetPointer(d_NeighborCounts), neighborSet.Counts, particleCount);
CudaHelper::MemcpyDeviceToHost(CudaHelper::GetPointer(d_NeighborWriteOffsets), neighborSet.Offsets, particleCount);
USE_TIMING(Timing::stopTiming(PRINT_STATS));
}
}
| 96f4815d6f5b55b1208d302f448a3b60b6dcc5f0.cu | #include "cuNSearchDeviceData.h"
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/gather.h>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/fill.h>
#ifdef DEBUG
#define PRINT_STATS true
#define USE_TIMING(x) x;
#else
#define PRINT_STATS false
#define USE_TIMING(x)
#endif
#include "Timing.h"
#define PRINT_STATS true
#define USE_TIMING(x) x;
#include "PointSetImplementation.h"
#include "GridInfo.h"
#include "cuda_helper.h"
#include "cuNSearchKernels.cuh"
namespace cuNSearch
{
void cuNSearchDeviceData::computeMinMax(PointSet &pointSet)
{
if (pointSet.n_points() == 0)
return;
auto pointSetImpl = pointSet.impl.get();
Int3 data[2];
data[0] = Int3(std::numeric_limits<int>().max(), std::numeric_limits<int>().max(), std::numeric_limits<int>().max());
data[1] = Int3(std::numeric_limits<int>().min(), std::numeric_limits<int>().min(), std::numeric_limits<int>().min());
d_MinMax.resize(2);
CudaHelper::MemcpyHostToDevice(data, CudaHelper::GetPointer(d_MinMax), 2);
kComputeMinMax << <pointSetImpl->BlockStartsForParticles, pointSetImpl->ThreadsPerBlock >> > (
(Real3*)CudaHelper::GetPointer(pointSetImpl->d_Particles),
static_cast<unsigned int>(pointSet.n_points()),
m_SearchRadius,
CudaHelper::GetPointer(d_MinMax),
CudaHelper::GetPointer(d_MinMax) + 1
);
CudaHelper::CheckLastError();
CudaHelper::DeviceSynchronize();
CudaHelper::MemcpyDeviceToHost(CudaHelper::GetPointer(d_MinMax), data, 2);
Int3 minCell = data[0];
Int3 maxCell = data[1];
pointSetImpl->Min.x = minCell.x * m_SearchRadius;
pointSetImpl->Min.y = minCell.y * m_SearchRadius;
pointSetImpl->Min.z = minCell.z * m_SearchRadius;
pointSetImpl->Max.x = maxCell.x * m_SearchRadius;
pointSetImpl->Max.y = maxCell.y * m_SearchRadius;
pointSetImpl->Max.z = maxCell.z * m_SearchRadius;
//CPU implementation of min max computation
//Real3 cpuMin, cpuMax;
//cpuMin = make_Real3(std::numeric_limits<Real>().max());
//cpuMax = make_Real3(std::numeric_limits<Real>().min());
//Real3 *points = (Real3 *)pointSet.m_x;
//for (size_t i = 0; i < pointSet.n_points(); i++)
//{
// cpuMin.x = std::min(cpuMin.x, points[i].x);
// cpuMin.y = std::min(cpuMin.y, points[i].y);
// cpuMin.z = std::min(cpuMin.z, points[i].z);
// cpuMax.x = std::max(cpuMax.x, points[i].x);
// cpuMax.y = std::max(cpuMax.y, points[i].y);
// cpuMax.z = std::max(cpuMax.z, points[i].z);
//}
}
void cuNSearchDeviceData::computeCellInformation(PointSet &pointSet)
{
if (pointSet.n_points() == 0)
return;
auto pointSetImpl = pointSet.impl.get();
Real3 sceneMin = pointSetImpl->Min;
Real3 sceneMax = pointSetImpl->Max;
GridInfo gridInfo;
gridInfo.ParticleCount = static_cast<uint>(pointSet.n_points());
gridInfo.SquaredSearchRadius = m_SearchRadius * m_SearchRadius;
gridInfo.GridMin = sceneMin;
Real cellSize = m_SearchRadius;
Real3 gridSize = sceneMax - sceneMin;
gridInfo.GridDimension.x = static_cast<unsigned int>(ceil(gridSize.x / cellSize));
gridInfo.GridDimension.y = static_cast<unsigned int>(ceil(gridSize.y / cellSize));
gridInfo.GridDimension.z = static_cast<unsigned int>(ceil(gridSize.z / cellSize));
//Increase grid by 2 cells in each direciton (+4 in each dimension) to skip bounds checks in the kernel
gridInfo.GridDimension.x += 4;
gridInfo.GridDimension.y += 4;
gridInfo.GridDimension.z += 4;
gridInfo.GridMin -= Real3(cellSize, cellSize, cellSize) * (Real)2;
//One meta grid cell contains 8x8x8 grild cells. (512)
gridInfo.MetaGridDimension.x = static_cast<unsigned int>(ceil(gridInfo.GridDimension.x / (float)CUDA_META_GRID_GROUP_SIZE));
gridInfo.MetaGridDimension.y = static_cast<unsigned int>(ceil(gridInfo.GridDimension.y / (float)CUDA_META_GRID_GROUP_SIZE));
gridInfo.MetaGridDimension.z = static_cast<unsigned int>(ceil(gridInfo.GridDimension.z / (float)CUDA_META_GRID_GROUP_SIZE));
// Adjust grid size to multiple of cell size
gridSize.x = gridInfo.GridDimension.x * cellSize;
gridSize.y = gridInfo.GridDimension.y * cellSize;
gridSize.z = gridInfo.GridDimension.z * cellSize;
gridInfo.GridDelta.x = gridInfo.GridDimension.x / gridSize.x;
gridInfo.GridDelta.y = gridInfo.GridDimension.y / gridSize.y;
gridInfo.GridDelta.z = gridInfo.GridDimension.z / gridSize.z;
d_TempSortIndices.resize(gridInfo.ParticleCount);
uint numberOfCells = (gridInfo.MetaGridDimension.x * gridInfo.MetaGridDimension.y * gridInfo.MetaGridDimension.z) * CUDA_META_GRID_BLOCK_SIZE;
pointSet.impl->prepareInternalDataStructures(gridInfo, numberOfCells);
CudaHelper::CheckLastError();
CudaHelper::DeviceSynchronize();
cudaMemset(CudaHelper::GetPointer(pointSetImpl->d_CellParticleCounts), 0, CudaHelper::GetSizeInBytes(pointSetImpl->d_CellParticleCounts));
CudaHelper::CheckLastError();
CudaHelper::DeviceSynchronize();
kInsertParticles_Morton << <pointSetImpl->BlockStartsForParticles, pointSetImpl->ThreadsPerBlock >> > (
gridInfo,
(Real3*)CudaHelper::GetPointer(pointSetImpl->d_Particles),
CudaHelper::GetPointer(pointSetImpl->d_ParticleCellIndices),
CudaHelper::GetPointer(pointSetImpl->d_CellParticleCounts),
CudaHelper::GetPointer(d_TempSortIndices)
);
CudaHelper::CheckLastError();
CudaHelper::DeviceSynchronize();
thrust::exclusive_scan(
pointSetImpl->d_CellParticleCounts.begin(),
pointSetImpl->d_CellParticleCounts.end(),
pointSetImpl->d_CellOffsets.begin());
CudaHelper::DeviceSynchronize();
kCountingSortIndices << <pointSetImpl->BlockStartsForParticles, pointSetImpl->ThreadsPerBlock >> > (
gridInfo,
CudaHelper::GetPointer(pointSetImpl->d_ParticleCellIndices),
CudaHelper::GetPointer(pointSetImpl->d_CellOffsets),
CudaHelper::GetPointer(d_TempSortIndices),
CudaHelper::GetPointer(pointSetImpl->d_SortIndices),
CudaHelper::GetPointer(pointSetImpl->d_posInSortedPoints)
);
CudaHelper::DeviceSynchronize();
auto &tempSequence = d_TempSortIndices;
thrust::sequence(tempSequence.begin(), tempSequence.end());
thrust::gather(
pointSetImpl->d_SortIndices.begin(),
pointSetImpl->d_SortIndices.end(),
tempSequence.begin(),
pointSetImpl->d_ReversedSortIndices.begin());
// YZ: use this if the points are actually sorted and reordered in memory
//thrust::sort_by_key(pointSetImpl->d_posInSortedPoints.begin(), pointSetImpl->d_posInSortedPoints.end(), pointSetImpl->d_Particles.begin());
CudaHelper::CheckLastError();
CudaHelper::DeviceSynchronize();
pointSet.sortIndices.resize(pointSetImpl->d_SortIndices.size());
CudaHelper::MemcpyDeviceToHost(CudaHelper::GetPointer(pointSetImpl->d_SortIndices), pointSet.sortIndices.data(), pointSetImpl->d_SortIndices.size());
// YZ: this is just to confirm that d_ReversedSortIndices is exactly the same as d_SortIndices, and so the gather is redundant.
//thrust::host_vector<uint> temp_d_SortIndices(pointSet.sortIndices.size());
//thrust::copy(pointSetImpl->d_ReversedSortIndices.begin(), pointSetImpl->d_ReversedSortIndices.end(), temp_d_SortIndices.begin());
//for (unsigned int i = 0; i < pointSet.sortIndices.size(); i++) {
// fprintf(stdout, "%u, %u\n", pointSet.sortIndices[i], temp_d_SortIndices[i]);
//}
}
void cuNSearchDeviceData::computeNeighborhood(PointSet &queryPointSet, PointSet &pointSet, uint neighborListEntry)
{
if (queryPointSet.n_points() == 0)
return;
auto queryPointSetImpl = queryPointSet.impl.get();
auto pointSetImpl = pointSet.impl.get();
uint particleCount = static_cast<uint>(queryPointSet.n_points());
USE_TIMING(Timing::startTiming("Execute kNeighborCount"));
d_NeighborCounts.resize(particleCount);
kComputeCounts << <queryPointSetImpl->BlockStartsForParticles, queryPointSetImpl->ThreadsPerBlock >> > (
(Real3*)CudaHelper::GetPointer(queryPointSetImpl->d_Particles),
static_cast<unsigned int>(queryPointSet.n_points()),
pointSetImpl->gridInfo,
(Real3*)CudaHelper::GetPointer(pointSetImpl->d_Particles),
CudaHelper::GetPointer(pointSetImpl->d_CellOffsets),
CudaHelper::GetPointer(pointSetImpl->d_CellParticleCounts),
CudaHelper::GetPointer(d_NeighborCounts),
CudaHelper::GetPointer(pointSetImpl->d_ReversedSortIndices)
);
CudaHelper::CheckLastError();
CudaHelper::DeviceSynchronize();
USE_TIMING(Timing::stopTiming(PRINT_STATS));
USE_TIMING(Timing::startTiming("Execute exclusive_scan over counts"));
d_NeighborWriteOffsets.resize(particleCount);
//Prefix sum over neighbor counts
thrust::exclusive_scan(
d_NeighborCounts.begin(),
d_NeighborCounts.end(),
d_NeighborWriteOffsets.begin());
CudaHelper::DeviceSynchronize();
//Compute total amount of neighbors
uint lastOffset = 0;
CudaHelper::MemcpyDeviceToHost(CudaHelper::GetPointer(d_NeighborWriteOffsets) + particleCount - 1, &lastOffset, 1);
uint lastParticleNeighborCount = 0;
CudaHelper::MemcpyDeviceToHost(CudaHelper::GetPointer(d_NeighborCounts) + particleCount - 1, &lastParticleNeighborCount, 1);
uint totalNeighborCount = lastOffset + lastParticleNeighborCount;
d_Neighbors.resize(totalNeighborCount);
CudaHelper::DeviceSynchronize();
USE_TIMING(Timing::stopTiming(PRINT_STATS));
USE_TIMING(Timing::startTiming("Execute kNeighborhoodQueryWithCounts"));
kNeighborhoodQueryWithCounts << <queryPointSetImpl->BlockStartsForParticles, queryPointSetImpl->ThreadsPerBlock >> > (
(Real3*)CudaHelper::GetPointer(queryPointSetImpl->d_Particles),
static_cast<unsigned int>(queryPointSet.n_points()),
pointSetImpl->gridInfo,
(Real3*)CudaHelper::GetPointer(pointSetImpl->d_Particles),
CudaHelper::GetPointer(pointSetImpl->d_CellOffsets),
CudaHelper::GetPointer(pointSetImpl->d_CellParticleCounts),
CudaHelper::GetPointer(d_NeighborWriteOffsets),
CudaHelper::GetPointer(d_Neighbors),
CudaHelper::GetPointer(pointSetImpl->d_ReversedSortIndices)
);
CudaHelper::CheckLastError();
CudaHelper::DeviceSynchronize();
USE_TIMING(Timing::stopTiming(PRINT_STATS));
//Copy data to host
USE_TIMING(Timing::startTiming("Neighbor copy from device to host - resize"));
auto &neighborSet = queryPointSet.neighbors[neighborListEntry];
// YZ: this check will fail from the 2nd run onwards
if (neighborSet.NeighborCountAllocationSize < totalNeighborCount)
{
if (neighborSet.NeighborCountAllocationSize != 0)
{
cudaFreeHost(neighborSet.Neighbors);
}
neighborSet.NeighborCountAllocationSize = static_cast<unsigned int>(totalNeighborCount * 1.5);
cudaMallocHost(&neighborSet.Neighbors, sizeof(uint) * neighborSet.NeighborCountAllocationSize);
}
if (neighborSet.ParticleCountAllocationSize < particleCount)
{
if (neighborSet.ParticleCountAllocationSize != 0)
{
cudaFreeHost(neighborSet.Offsets);
cudaFreeHost(neighborSet.Counts);
}
neighborSet.ParticleCountAllocationSize = static_cast<unsigned int>(particleCount * 1.5);
cudaMallocHost(&neighborSet.Offsets, sizeof(uint) * neighborSet.ParticleCountAllocationSize);
cudaMallocHost(&neighborSet.Counts, sizeof(uint) * neighborSet.ParticleCountAllocationSize);
}
USE_TIMING(Timing::stopTiming(PRINT_STATS));
USE_TIMING(Timing::startTiming("Neighbor copy from device to host - MemcpyDeviceToHost"));
if (PRINT_STATS)
{
unsigned long bytesToCopy = totalNeighborCount * 4 + particleCount * 2 * 4;
printf("Total neighbors: %d \n", totalNeighborCount);
printf("Average neighbors: %d \n", totalNeighborCount / particleCount);
printf("Expected amount: %f MB \n", bytesToCopy / (1024.0f * 1024.0f));
}
CudaHelper::MemcpyDeviceToHost(CudaHelper::GetPointer(d_Neighbors), neighborSet.Neighbors, totalNeighborCount);
CudaHelper::MemcpyDeviceToHost(CudaHelper::GetPointer(d_NeighborCounts), neighborSet.Counts, particleCount);
CudaHelper::MemcpyDeviceToHost(CudaHelper::GetPointer(d_NeighborWriteOffsets), neighborSet.Offsets, particleCount);
USE_TIMING(Timing::stopTiming(PRINT_STATS));
}
}
|
6cff8b3bf818004ea9c24663edf18c5489e07901.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <linalg/add.cuh>
#include <random/rng.cuh>
#include "add_hip.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename InT, typename OutT = InT>
class AddTest : public ::testing::TestWithParam<AddInputs<InT, OutT>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<AddInputs<InT, OutT>>::GetParam();
Random::Rng r(params.seed);
int len = params.len;
CUDA_CHECK(hipStreamCreate(&stream));
allocate(in1, len);
allocate(in2, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in1, len, InT(-1.0), InT(1.0), stream);
r.uniform(in2, len, InT(-1.0), InT(1.0), stream);
naiveAddElem<InT, OutT>(out_ref, in1, in2, len);
add<InT, OutT>(out, in1, in2, len, stream);
}
void TearDown() override {
CUDA_CHECK(hipStreamSynchronize(stream));
CUDA_CHECK(hipFree(in1));
CUDA_CHECK(hipFree(in2));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipStreamDestroy(stream));
}
void compare() {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<OutT>(params.tolerance)));
}
protected:
AddInputs<InT, OutT> params;
InT *in1, *in2;
OutT *out_ref, *out;
hipStream_t stream;
};
const std::vector<AddInputs<float>> inputsf = {
{0.000001f, 1024 * 1024, 1234ULL},
{0.000001f, 1024 * 1024 + 2, 1234ULL},
{0.000001f, 1024 * 1024 + 1, 1234ULL},
};
typedef AddTest<float> AddTestF;
TEST_P(AddTestF, Result) { compare(); }
INSTANTIATE_TEST_CASE_P(AddTests, AddTestF, ::testing::ValuesIn(inputsf));
const std::vector<AddInputs<double>> inputsd = {
{0.00000001, 1024 * 1024, 1234ULL},
{0.00000001, 1024 * 1024 + 2, 1234ULL},
{0.00000001, 1024 * 1024 + 1, 1234ULL},
};
typedef AddTest<double> AddTestD;
TEST_P(AddTestD, Result) { compare(); }
INSTANTIATE_TEST_CASE_P(AddTests, AddTestD, ::testing::ValuesIn(inputsd));
const std::vector<AddInputs<float, double>> inputsfd = {
{0.00000001, 1024 * 1024, 1234ULL},
{0.00000001, 1024 * 1024 + 2, 1234ULL},
{0.00000001, 1024 * 1024 + 1, 1234ULL},
};
typedef AddTest<float, double> AddTestFD;
TEST_P(AddTestFD, Result) { compare(); }
INSTANTIATE_TEST_CASE_P(AddTests, AddTestFD, ::testing::ValuesIn(inputsfd));
} // end namespace LinAlg
} // end namespace MLCommon
| 6cff8b3bf818004ea9c24663edf18c5489e07901.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <linalg/add.cuh>
#include <random/rng.cuh>
#include "add.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename InT, typename OutT = InT>
class AddTest : public ::testing::TestWithParam<AddInputs<InT, OutT>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<AddInputs<InT, OutT>>::GetParam();
Random::Rng r(params.seed);
int len = params.len;
CUDA_CHECK(cudaStreamCreate(&stream));
allocate(in1, len);
allocate(in2, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in1, len, InT(-1.0), InT(1.0), stream);
r.uniform(in2, len, InT(-1.0), InT(1.0), stream);
naiveAddElem<InT, OutT>(out_ref, in1, in2, len);
add<InT, OutT>(out, in1, in2, len, stream);
}
void TearDown() override {
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaFree(in1));
CUDA_CHECK(cudaFree(in2));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaStreamDestroy(stream));
}
void compare() {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<OutT>(params.tolerance)));
}
protected:
AddInputs<InT, OutT> params;
InT *in1, *in2;
OutT *out_ref, *out;
cudaStream_t stream;
};
const std::vector<AddInputs<float>> inputsf = {
{0.000001f, 1024 * 1024, 1234ULL},
{0.000001f, 1024 * 1024 + 2, 1234ULL},
{0.000001f, 1024 * 1024 + 1, 1234ULL},
};
typedef AddTest<float> AddTestF;
TEST_P(AddTestF, Result) { compare(); }
INSTANTIATE_TEST_CASE_P(AddTests, AddTestF, ::testing::ValuesIn(inputsf));
const std::vector<AddInputs<double>> inputsd = {
{0.00000001, 1024 * 1024, 1234ULL},
{0.00000001, 1024 * 1024 + 2, 1234ULL},
{0.00000001, 1024 * 1024 + 1, 1234ULL},
};
typedef AddTest<double> AddTestD;
TEST_P(AddTestD, Result) { compare(); }
INSTANTIATE_TEST_CASE_P(AddTests, AddTestD, ::testing::ValuesIn(inputsd));
const std::vector<AddInputs<float, double>> inputsfd = {
{0.00000001, 1024 * 1024, 1234ULL},
{0.00000001, 1024 * 1024 + 2, 1234ULL},
{0.00000001, 1024 * 1024 + 1, 1234ULL},
};
typedef AddTest<float, double> AddTestFD;
TEST_P(AddTestFD, Result) { compare(); }
INSTANTIATE_TEST_CASE_P(AddTests, AddTestFD, ::testing::ValuesIn(inputsfd));
} // end namespace LinAlg
} // end namespace MLCommon
|
a481bffec31d932d68b7701f0023a99361840822.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <GL/glut.h>
#include <GL/gl.h>
#include <malloc.h>
#include <signal.h>
/******************************************************************************
Displays two grey scale images. On the left is an image that has come from an
image processing pipeline, just after colour thresholding. On the right is
the result of applying an edge detection convolution operator to the left
image. This program performs that convolution.
Things to note:
- A single unsigned char stores a pixel intensity value. 0 is black, 256 is
white.
- The colour mode used is GL_LUMINANCE. This uses a single number to
represent a pixel's intensity. In this case we want 256 shades of grey,
which is best stored in eight bits, so GL_UNSIGNED_BYTE is specified as
the pixel data type.
To compile adapt the code below wo match your filenames:
nvcc -o SantosImage SantosImage.cu -lglut -lGL -lm
Dr Kevan Buckley, University of Wolverhampton, 2018
******************************************************************************/
#define width 100
#define height 72
unsigned char image[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,0,0,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
};
unsigned char results[width * height];
__global__ void detect_edges(unsigned char *input, unsigned char *output) {
int i = (blockIdx.x * 72) + threadIdx.x;
int x, y; // the pixel of interest
int b, d, f, h; // the pixels adjacent to x,y used for the calculation
int r; // the result of calculate
y = i / width;;
x = i - (width * y);
if (x == 0 || y == 0 || x == width - 1 || y == height - 1) {
output[i] = 0;
} else {
b = i + width;
d = i - 1;
f = i + 1;
h = i - width;
r = (input[i] * 4) + (input[b] * -1) + (input[d] * -1) + (input[f] * -1)
+ (input[h] * -1);
if (r >= 0) {
output[i] = 255;
} else
{
output[i] = 0;
}
}
}
void tidy_and_exit() {
exit(0);
}
void sigint_callback(int signal_number){
printf("\nInterrupt from keyboard\n");
tidy_and_exit();
}
static void display() {
glClear(GL_COLOR_BUFFER_BIT);
glRasterPos4i(-1, -1, 0, 1);
glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, image);
glRasterPos4i(0, -1, 0, 1);
glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, results);
glFlush();
}
static void key_pressed(unsigned char key, int x, int y) {
switch(key){
case 27: // escape
tidy_and_exit();
break;
default:
printf("\nPress escape to exit\n");
break;
}
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(int argc, char **argv) {
unsigned char *d_results;
unsigned char *d_image;
hipMalloc((void**)&d_image, sizeof(unsigned char) * (width * height));
hipMalloc((void**)&d_results, sizeof(unsigned char) * (width * height));
hipMemcpy(d_image, &image, sizeof(unsigned char) * (width * height), hipMemcpyHostToDevice);
signal(SIGINT, sigint_callback);
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipLaunchKernelGGL(( detect_edges), dim3(100),dim3(72), 0, 0, d_image, d_results);
hipDeviceSynchronize();
hipMemcpy(&results, d_results, sizeof(unsigned char) * (width * height), hipMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC, &finish);
\
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
hipFree(&d_image);
hipFree(&d_results);
glutInit(&argc, argv);
glutInitWindowSize(width * 2,height);
glutInitDisplayMode(GLUT_SINGLE | GLUT_LUMINANCE);
glutCreateWindow("Cuda Image Processing ");
glutDisplayFunc(display);
glutKeyboardFunc(key_pressed);
glClearColor(0.0, 1.0, 0.0, 1.0);
glutMainLoop();
tidy_and_exit();
return 0;
}
| a481bffec31d932d68b7701f0023a99361840822.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <GL/glut.h>
#include <GL/gl.h>
#include <malloc.h>
#include <signal.h>
/******************************************************************************
Displays two grey scale images. On the left is an image that has come from an
image processing pipeline, just after colour thresholding. On the right is
the result of applying an edge detection convolution operator to the left
image. This program performs that convolution.
Things to note:
- A single unsigned char stores a pixel intensity value. 0 is black, 256 is
white.
- The colour mode used is GL_LUMINANCE. This uses a single number to
represent a pixel's intensity. In this case we want 256 shades of grey,
which is best stored in eight bits, so GL_UNSIGNED_BYTE is specified as
the pixel data type.
To compile adapt the code below wo match your filenames:
nvcc -o SantosImage SantosImage.cu -lglut -lGL -lm
Dr Kevan Buckley, University of Wolverhampton, 2018
******************************************************************************/
#define width 100
#define height 72
unsigned char image[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,0,0,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
};
unsigned char results[width * height];
__global__ void detect_edges(unsigned char *input, unsigned char *output) {
int i = (blockIdx.x * 72) + threadIdx.x;
int x, y; // the pixel of interest
int b, d, f, h; // the pixels adjacent to x,y used for the calculation
int r; // the result of calculate
y = i / width;;
x = i - (width * y);
if (x == 0 || y == 0 || x == width - 1 || y == height - 1) {
output[i] = 0;
} else {
b = i + width;
d = i - 1;
f = i + 1;
h = i - width;
r = (input[i] * 4) + (input[b] * -1) + (input[d] * -1) + (input[f] * -1)
+ (input[h] * -1);
if (r >= 0) {
output[i] = 255;
} else
{
output[i] = 0;
}
}
}
void tidy_and_exit() {
exit(0);
}
void sigint_callback(int signal_number){
printf("\nInterrupt from keyboard\n");
tidy_and_exit();
}
static void display() {
glClear(GL_COLOR_BUFFER_BIT);
glRasterPos4i(-1, -1, 0, 1);
glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, image);
glRasterPos4i(0, -1, 0, 1);
glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, results);
glFlush();
}
static void key_pressed(unsigned char key, int x, int y) {
switch(key){
case 27: // escape
tidy_and_exit();
break;
default:
printf("\nPress escape to exit\n");
break;
}
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(int argc, char **argv) {
unsigned char *d_results;
unsigned char *d_image;
cudaMalloc((void**)&d_image, sizeof(unsigned char) * (width * height));
cudaMalloc((void**)&d_results, sizeof(unsigned char) * (width * height));
cudaMemcpy(d_image, &image, sizeof(unsigned char) * (width * height), cudaMemcpyHostToDevice);
signal(SIGINT, sigint_callback);
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
detect_edges<<<100,72>>>(d_image, d_results);
cudaThreadSynchronize();
cudaMemcpy(&results, d_results, sizeof(unsigned char) * (width * height), cudaMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC, &finish);
\
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
cudaFree(&d_image);
cudaFree(&d_results);
glutInit(&argc, argv);
glutInitWindowSize(width * 2,height);
glutInitDisplayMode(GLUT_SINGLE | GLUT_LUMINANCE);
glutCreateWindow("Cuda Image Processing ");
glutDisplayFunc(display);
glutKeyboardFunc(key_pressed);
glClearColor(0.0, 1.0, 0.0, 1.0);
glutMainLoop();
tidy_and_exit();
return 0;
}
|
909c88e8546d8db8c150728e60beab2eb6213b2f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <unistd.h>
#include <getopt.h>
#include <stdlib.h>
#include <assert.h>
#include <sys/time.h>
#include <string.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include "common.h"
#define BLOCK_SIZE 16
#include "lud_kernels.cu"
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
static int do_verify = 0;
static struct option long_options[] = {
/* name, has_arg, flag, val */
{"input", 1, NULL, 'i'},
{"size", 1, NULL, 's'},
{"verify", 0, NULL, 'v'},
{0,0,0,0}
};
int main ( int argc, char *argv[] )
{
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
int matrix_dim = 32; /* default matrix_dim */
int opt, option_index=0;
func_ret_t ret;
const char *input_file = NULL;
float *m, *mm;
stopwatch sw;
while ((opt = getopt_long(argc, argv, "::vs:i:",
long_options, &option_index)) != -1 ) {
switch(opt){
case 'i':
input_file = optarg;
break;
case 'v':
do_verify = 1;
break;
case 's':
matrix_dim = atoi(optarg);
printf("Generate input matrix internally, size =%d\n", matrix_dim);
// fprintf(stderr, "Currently not supported, use -i instead\n");
// fprintf(stderr, "Usage: %s [-v] [-s matrix_size|-i input_file]\n", argv[0]);
// exit(EXIT_FAILURE);
break;
case '?':
fprintf(stderr, "invalid option\n");
break;
case ':':
fprintf(stderr, "missing argument\n");
break;
default:
fprintf(stderr, "Usage: %s [-v] [-s matrix_size|-i input_file]\n",
argv[0]);
exit(EXIT_FAILURE);
}
}
if ( (optind < argc) || (optind == 1)) {
fprintf(stderr, "Usage: %s [-v] [-s matrix_size|-i input_file]\n", argv[0]);
exit(EXIT_FAILURE);
}
if (input_file) {
printf("Reading matrix from file %s\n", input_file);
ret = create_matrix_from_file(&m, input_file, &matrix_dim);
if (ret != RET_SUCCESS) {
m = NULL;
fprintf(stderr, "error create matrix from file %s\n", input_file);
exit(EXIT_FAILURE);
}
}
else if (matrix_dim) {
printf("Creating matrix internally size=%d\n", matrix_dim);
ret = create_matrix(&m, matrix_dim);
if (ret != RET_SUCCESS) {
m = NULL;
fprintf(stderr, "error create matrix internally size=%d\n", matrix_dim);
exit(EXIT_FAILURE);
}
}
else {
printf("No input file specified!\n");
exit(EXIT_FAILURE);
}
if (do_verify){
printf("Before LUD\n");
// print_matrix(m, matrix_dim);
matrix_duplicate(m, &mm, matrix_dim);
}
/* beginning of timing point */
stopwatch_start(&sw);
float *d_m;
hipMalloc((void**)&d_m, matrix_dim*matrix_dim*sizeof(float));
hipMemcpy(d_m, m, matrix_dim*matrix_dim*sizeof(float), hipMemcpyHostToDevice);
int offset;
int i=0;
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (i=0; i < matrix_dim-BLOCK_SIZE; i += BLOCK_SIZE) {
offset = i; // add the offset
hipLaunchKernelGGL(( lud_diagonal), dim3(1), dim3(BLOCK_SIZE), 0, 0, d_m, matrix_dim, offset);
hipLaunchKernelGGL(( lud_perimeter), dim3((matrix_dim-i)/BLOCK_SIZE-1), dim3(2*BLOCK_SIZE), 0, 0, d_m, matrix_dim, offset);
hipLaunchKernelGGL(( lud_internal), dim3(dim3((matrix_dim-i)/BLOCK_SIZE-1, (matrix_dim-i)/BLOCK_SIZE-1)),
dim3( dim3(BLOCK_SIZE, BLOCK_SIZE)), 0, 0, d_m, matrix_dim, offset);
} // for
offset = i; // add the offset
hipLaunchKernelGGL(( lud_diagonal), dim3(1), dim3(BLOCK_SIZE), 0, 0, d_m, matrix_dim, offset);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Total kernel execution time : %f (s)\n", time * 1e-9f);
hipMemcpy(m, d_m, matrix_dim*matrix_dim*sizeof(float), hipMemcpyDeviceToHost);
/* end of timing point */
stopwatch_stop(&sw);
printf("Device offloading time (s): %lf\n", get_interval_by_sec(&sw));
if (do_verify){
printf("After LUD\n");
// print_matrix(m, matrix_dim);
printf(">>>Verify<<<<\n");
lud_verify(mm, m, matrix_dim);
free(mm);
}
free(m);
hipFree(d_m);
return 0;
}
| 909c88e8546d8db8c150728e60beab2eb6213b2f.cu | #include <stdio.h>
#include <unistd.h>
#include <getopt.h>
#include <stdlib.h>
#include <assert.h>
#include <sys/time.h>
#include <string.h>
#include <chrono>
#include <cuda.h>
#include "common.h"
#define BLOCK_SIZE 16
#include "lud_kernels.cu"
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
static int do_verify = 0;
static struct option long_options[] = {
/* name, has_arg, flag, val */
{"input", 1, NULL, 'i'},
{"size", 1, NULL, 's'},
{"verify", 0, NULL, 'v'},
{0,0,0,0}
};
int main ( int argc, char *argv[] )
{
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
int matrix_dim = 32; /* default matrix_dim */
int opt, option_index=0;
func_ret_t ret;
const char *input_file = NULL;
float *m, *mm;
stopwatch sw;
while ((opt = getopt_long(argc, argv, "::vs:i:",
long_options, &option_index)) != -1 ) {
switch(opt){
case 'i':
input_file = optarg;
break;
case 'v':
do_verify = 1;
break;
case 's':
matrix_dim = atoi(optarg);
printf("Generate input matrix internally, size =%d\n", matrix_dim);
// fprintf(stderr, "Currently not supported, use -i instead\n");
// fprintf(stderr, "Usage: %s [-v] [-s matrix_size|-i input_file]\n", argv[0]);
// exit(EXIT_FAILURE);
break;
case '?':
fprintf(stderr, "invalid option\n");
break;
case ':':
fprintf(stderr, "missing argument\n");
break;
default:
fprintf(stderr, "Usage: %s [-v] [-s matrix_size|-i input_file]\n",
argv[0]);
exit(EXIT_FAILURE);
}
}
if ( (optind < argc) || (optind == 1)) {
fprintf(stderr, "Usage: %s [-v] [-s matrix_size|-i input_file]\n", argv[0]);
exit(EXIT_FAILURE);
}
if (input_file) {
printf("Reading matrix from file %s\n", input_file);
ret = create_matrix_from_file(&m, input_file, &matrix_dim);
if (ret != RET_SUCCESS) {
m = NULL;
fprintf(stderr, "error create matrix from file %s\n", input_file);
exit(EXIT_FAILURE);
}
}
else if (matrix_dim) {
printf("Creating matrix internally size=%d\n", matrix_dim);
ret = create_matrix(&m, matrix_dim);
if (ret != RET_SUCCESS) {
m = NULL;
fprintf(stderr, "error create matrix internally size=%d\n", matrix_dim);
exit(EXIT_FAILURE);
}
}
else {
printf("No input file specified!\n");
exit(EXIT_FAILURE);
}
if (do_verify){
printf("Before LUD\n");
// print_matrix(m, matrix_dim);
matrix_duplicate(m, &mm, matrix_dim);
}
/* beginning of timing point */
stopwatch_start(&sw);
float *d_m;
cudaMalloc((void**)&d_m, matrix_dim*matrix_dim*sizeof(float));
cudaMemcpy(d_m, m, matrix_dim*matrix_dim*sizeof(float), cudaMemcpyHostToDevice);
int offset;
int i=0;
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (i=0; i < matrix_dim-BLOCK_SIZE; i += BLOCK_SIZE) {
offset = i; // add the offset
lud_diagonal<<<1, BLOCK_SIZE>>>(d_m, matrix_dim, offset);
lud_perimeter<<<(matrix_dim-i)/BLOCK_SIZE-1, 2*BLOCK_SIZE>>>(d_m, matrix_dim, offset);
lud_internal<<< dim3((matrix_dim-i)/BLOCK_SIZE-1, (matrix_dim-i)/BLOCK_SIZE-1),
dim3(BLOCK_SIZE, BLOCK_SIZE)>>>(d_m, matrix_dim, offset);
} // for
offset = i; // add the offset
lud_diagonal<<<1, BLOCK_SIZE>>>(d_m, matrix_dim, offset);
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Total kernel execution time : %f (s)\n", time * 1e-9f);
cudaMemcpy(m, d_m, matrix_dim*matrix_dim*sizeof(float), cudaMemcpyDeviceToHost);
/* end of timing point */
stopwatch_stop(&sw);
printf("Device offloading time (s): %lf\n", get_interval_by_sec(&sw));
if (do_verify){
printf("After LUD\n");
// print_matrix(m, matrix_dim);
printf(">>>Verify<<<<\n");
lud_verify(mm, m, matrix_dim);
free(mm);
}
free(m);
cudaFree(d_m);
return 0;
}
|
f2d4940bfa7895e1049f7535d2944992a027628d.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/hip/HIPApplyUtils.cuh>
namespace at { namespace native {
using where_fn = void (*)(TensorIterator &, ScalarType);
DECLARE_DISPATCH(where_fn, where_kernel);
namespace {
void where_kernel_impl(TensorIterator &iter, ScalarType condition_type) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBool, iter.dtype(), "where_cuda", [&] {
if (condition_type == at::ScalarType::Byte) {
gpu_kernel(
iter,
[=] GPU_LAMBDA (uint8_t cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t {
return cond_val ? self_val : other_val;
});
} else {
gpu_kernel(
iter,
[=] GPU_LAMBDA (bool cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t {
return cond_val ? self_val : other_val;
});
}
});
}
} // anonymous namespace
REGISTER_DISPATCH(where_kernel, &where_kernel_impl);
}} // namespace at::native
| f2d4940bfa7895e1049f7535d2944992a027628d.cu | #include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/cuda/CUDAApplyUtils.cuh>
namespace at { namespace native {
using where_fn = void (*)(TensorIterator &, ScalarType);
DECLARE_DISPATCH(where_fn, where_kernel);
namespace {
void where_kernel_impl(TensorIterator &iter, ScalarType condition_type) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBool, iter.dtype(), "where_cuda", [&] {
if (condition_type == at::ScalarType::Byte) {
gpu_kernel(
iter,
[=] GPU_LAMBDA (uint8_t cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t {
return cond_val ? self_val : other_val;
});
} else {
gpu_kernel(
iter,
[=] GPU_LAMBDA (bool cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t {
return cond_val ? self_val : other_val;
});
}
});
}
} // anonymous namespace
REGISTER_DISPATCH(where_kernel, &where_kernel_impl);
}} // namespace at::native
|
e09f6986217396c7b5e8795cfe0246ab54d80a45.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/cuda/activation/bias_gelu_grad_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/shared_inc/fast_divmod.h"
#include "orttraining/training_ops/cpu/activation/gelu_computation_mode.h"
#include "orttraining/training_ops/cuda/activation/gelu_grad_impl_common.cuh"
namespace onnxruntime {
namespace cuda {
template <typename T, typename GeluComputationMode, int num_elements_per_thread>
__global__ void BiasGeluGradDxKernel(int64_t bias_size, const T* dY, const T* X, const T* B, T* dX) {
const auto num_elements_per_block = num_elements_per_thread * blockDim.x;
const auto input_base_idx = bias_size * blockIdx.y + num_elements_per_block * blockIdx.x + threadIdx.x;
const auto bias_base_idx = num_elements_per_block * blockIdx.x + threadIdx.x;
const auto element_stride = blockDim.x;
T reg_dY[num_elements_per_thread];
T reg_X[num_elements_per_thread];
T reg_B[num_elements_per_thread];
{
auto input_idx = input_base_idx;
auto bias_idx = bias_base_idx;
#pragma unroll
for (int element_idx = 0; element_idx < num_elements_per_thread; ++element_idx) {
if (bias_idx < bias_size) {
reg_dY[element_idx] = dY[input_idx];
reg_X[element_idx] = X[input_idx];
reg_B[element_idx] = B[bias_idx];
input_idx += element_stride;
bias_idx += element_stride;
}
}
}
{
auto input_idx = input_base_idx;
auto bias_idx = bias_base_idx;
#pragma unroll
for (int element_idx = 0; element_idx < num_elements_per_thread; ++element_idx) {
if (bias_idx < bias_size) {
dX[input_idx] = ComputeGeluGradScalar(
reg_dY[element_idx], reg_X[element_idx] + reg_B[element_idx], GeluComputationMode{});
input_idx += element_stride;
bias_idx += element_stride;
}
}
}
}
template <typename T, typename GeluComputationMode>
void LaunchBiasGeluGradDxKernel(
hipStream_t stream,
int64_t input_size, int64_t bias_size,
const T* dY, const T* X, const T* B, T* dX) {
// given a 2D grid of blocks:
// each grid row handles bias_size elements
// there are input_size / bias_size rows
constexpr int num_elements_per_thread = GridDim::maxElementsPerThread;
const int num_threads_per_block =
std::min<int>(static_cast<int>(CeilDiv(bias_size, num_elements_per_thread)), static_cast<int>(GridDim::maxThreadsPerBlock));
const auto grid_width = CeilDiv(bias_size, num_elements_per_thread * num_threads_per_block);
const auto grid_height = input_size / bias_size;
const dim3 grid_dim{static_cast<uint32_t>(grid_width), static_cast<uint32_t>(grid_height)};
hipLaunchKernelGGL(( BiasGeluGradDxKernel<T, GeluComputationMode, num_elements_per_thread>)
, dim3(grid_dim), dim3(num_threads_per_block), 0, stream, bias_size, dY, X, B, dX);
}
// explicit instantiations
#define SPECIALIZED_BIAS_GELU_GRAD_IMPL(T, GeluComputationMode) \
template void LaunchBiasGeluGradDxKernel<T, GeluComputationMode>( \
hipStream_t stream, int64_t input_size, int64_t bias_size, \
const T* dY, const T* X, const T* B, T* dX)
SPECIALIZED_BIAS_GELU_GRAD_IMPL(half, gelu_computation_mode::Default);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(float, gelu_computation_mode::Default);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(double, gelu_computation_mode::Default);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(half, gelu_computation_mode::Approximation);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(float, gelu_computation_mode::Approximation);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(double, gelu_computation_mode::Approximation);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(BFloat16, gelu_computation_mode::Default);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(BFloat16, gelu_computation_mode::Approximation);
#undef SPECIALIZED_BIAS_GELU_GRAD_IMPL
} // namespace cuda
} // namespace onnxruntime
| e09f6986217396c7b5e8795cfe0246ab54d80a45.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/cuda/activation/bias_gelu_grad_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/shared_inc/fast_divmod.h"
#include "orttraining/training_ops/cpu/activation/gelu_computation_mode.h"
#include "orttraining/training_ops/cuda/activation/gelu_grad_impl_common.cuh"
namespace onnxruntime {
namespace cuda {
template <typename T, typename GeluComputationMode, int num_elements_per_thread>
__global__ void BiasGeluGradDxKernel(int64_t bias_size, const T* dY, const T* X, const T* B, T* dX) {
const auto num_elements_per_block = num_elements_per_thread * blockDim.x;
const auto input_base_idx = bias_size * blockIdx.y + num_elements_per_block * blockIdx.x + threadIdx.x;
const auto bias_base_idx = num_elements_per_block * blockIdx.x + threadIdx.x;
const auto element_stride = blockDim.x;
T reg_dY[num_elements_per_thread];
T reg_X[num_elements_per_thread];
T reg_B[num_elements_per_thread];
{
auto input_idx = input_base_idx;
auto bias_idx = bias_base_idx;
#pragma unroll
for (int element_idx = 0; element_idx < num_elements_per_thread; ++element_idx) {
if (bias_idx < bias_size) {
reg_dY[element_idx] = dY[input_idx];
reg_X[element_idx] = X[input_idx];
reg_B[element_idx] = B[bias_idx];
input_idx += element_stride;
bias_idx += element_stride;
}
}
}
{
auto input_idx = input_base_idx;
auto bias_idx = bias_base_idx;
#pragma unroll
for (int element_idx = 0; element_idx < num_elements_per_thread; ++element_idx) {
if (bias_idx < bias_size) {
dX[input_idx] = ComputeGeluGradScalar(
reg_dY[element_idx], reg_X[element_idx] + reg_B[element_idx], GeluComputationMode{});
input_idx += element_stride;
bias_idx += element_stride;
}
}
}
}
template <typename T, typename GeluComputationMode>
void LaunchBiasGeluGradDxKernel(
cudaStream_t stream,
int64_t input_size, int64_t bias_size,
const T* dY, const T* X, const T* B, T* dX) {
// given a 2D grid of blocks:
// each grid row handles bias_size elements
// there are input_size / bias_size rows
constexpr int num_elements_per_thread = GridDim::maxElementsPerThread;
const int num_threads_per_block =
std::min<int>(static_cast<int>(CeilDiv(bias_size, num_elements_per_thread)), static_cast<int>(GridDim::maxThreadsPerBlock));
const auto grid_width = CeilDiv(bias_size, num_elements_per_thread * num_threads_per_block);
const auto grid_height = input_size / bias_size;
const dim3 grid_dim{static_cast<uint32_t>(grid_width), static_cast<uint32_t>(grid_height)};
BiasGeluGradDxKernel<T, GeluComputationMode, num_elements_per_thread>
<<<grid_dim, num_threads_per_block, 0, stream>>>(bias_size, dY, X, B, dX);
}
// explicit instantiations
#define SPECIALIZED_BIAS_GELU_GRAD_IMPL(T, GeluComputationMode) \
template void LaunchBiasGeluGradDxKernel<T, GeluComputationMode>( \
cudaStream_t stream, int64_t input_size, int64_t bias_size, \
const T* dY, const T* X, const T* B, T* dX)
SPECIALIZED_BIAS_GELU_GRAD_IMPL(half, gelu_computation_mode::Default);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(float, gelu_computation_mode::Default);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(double, gelu_computation_mode::Default);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(half, gelu_computation_mode::Approximation);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(float, gelu_computation_mode::Approximation);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(double, gelu_computation_mode::Approximation);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(BFloat16, gelu_computation_mode::Default);
SPECIALIZED_BIAS_GELU_GRAD_IMPL(BFloat16, gelu_computation_mode::Approximation);
#undef SPECIALIZED_BIAS_GELU_GRAD_IMPL
} // namespace cuda
} // namespace onnxruntime
|
dd89bfc65e99e36b31572e287e469592d45c5559.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, size_t size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = blockIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus;
int num = 0;
hipDeviceProp_t prop;
cudaStatus = hipGetDeviceCount(&num);
for(int i = 0;i<num;i++)
{
hipGetDeviceProperties(&prop,i);
}
cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",c[0],c[1],c[2],c[3],c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, size_t size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
hipStream_t stream[5];
for(int i = 0;i<5;i++)
{
hipStreamCreate(&stream[i]); //
}
// Launch a kernel on the GPU with one thread for each element.
for(int i = 0;i<5;i++)
{
// <<< block num, thread num, block's shared memory size, stream target >>>
hipLaunchKernelGGL(( addKernel), dim3(1),dim3(1),0,stream[i], dev_c+i, dev_a+i, dev_b+i); //
}
hipDeviceSynchronize();
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
for(int i = 0;i<5;i++)
{
hipStreamDestroy(stream[i]); //
}
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| dd89bfc65e99e36b31572e287e469592d45c5559.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = blockIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus;
int num = 0;
cudaDeviceProp prop;
cudaStatus = cudaGetDeviceCount(&num);
for(int i = 0;i<num;i++)
{
cudaGetDeviceProperties(&prop,i);
}
cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",c[0],c[1],c[2],c[3],c[4]);
// cudaThreadExit must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaThreadExit();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaThreadExit failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStream_t stream[5];
for(int i = 0;i<5;i++)
{
cudaStreamCreate(&stream[i]); //创建流
}
// Launch a kernel on the GPU with one thread for each element.
for(int i = 0;i<5;i++)
{
// <<< block num, thread num, block's shared memory size, stream target >>>
addKernel<<<1,1,0,stream[i]>>>(dev_c+i, dev_a+i, dev_b+i); //执行流
}
cudaDeviceSynchronize();
// cudaThreadSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaThreadSynchronize();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaThreadSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
for(int i = 0;i<5;i++)
{
cudaStreamDestroy(stream[i]); //销毁流
}
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
67a69da219c76b58beda0539f455652ea3135284.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "config.h"
#include "vcode.h"
#include <cvl.h>
#include "y.tab.h"
#include <cutil_inline.h>
#include "defins.cuh"
MAXALIGN *ComputeMemory = NULL;
extern "C" void init (MAXALIGN *mem) {
ComputeMemory = mem;
}
__global__ void fused0Kernel(MAXALIGN *data, int dst, int s0, int len, int scratch) {
int address = blockDim.y * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
if (address < len) {
int *pDst = (int*)(&data[dst]);
int *pSrc0 = (int*)(&data[s0]);
pDst[address] = (100);
}
}
__global__ void fused1Kernel(MAXALIGN *data, int dst, int s0, int s1, int len, int scratch) {
int address = blockDim.y * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
if (address < len) {
float *pDst = (float*)(&data[dst]);
float *pSrc0 = (float*)(&data[s0]);
int *pSrc1 = (int*)(&data[s1]);
pDst[address] = (divide(pSrc0[address], (z_to_d(pSrc1[address]))));
}
}
__global__ void fused2Kernel(MAXALIGN *data, int dst, int s0, int s1, int s2, int len, int scratch) {
int address = blockDim.y * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
if (address < len) {
float *pDst = (float*)(&data[dst]);
float *pSrc0 = (float*)(&data[s0]);
int *pSrc1 = (int*)(&data[s1]);
float *pSrc2 = (float*)(&data[s2]);
pDst[address] = (times((divide(pSrc0[address], (z_to_d(pSrc1[address])))), pSrc2[address]));
}
}
void fused0(vec_p d, vec_p s0, int len, vec_p scratch) {
if (len==0) {return;}
SYNC();
DEF_BLOCKS_PER_GRID(len);
hipLaunchKernelGGL(( fused0Kernel), dim3(blocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, ComputeMemory, d, s0, len, scratch);
cutilCheckMsg("fused0 execution failed\n");
}
void fused1(vec_p d, vec_p s0, vec_p s1, int len, vec_p scratch) {
if (len==0) {return;}
SYNC();
DEF_BLOCKS_PER_GRID(len);
hipLaunchKernelGGL(( fused1Kernel), dim3(blocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, ComputeMemory, d, s0, s1, len, scratch);
cutilCheckMsg("fused1 execution failed\n");
}
void fused2(vec_p d, vec_p s0, vec_p s1, vec_p s2, int len, vec_p scratch) {
if (len==0) {return;}
SYNC();
DEF_BLOCKS_PER_GRID(len);
hipLaunchKernelGGL(( fused2Kernel), dim3(blocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, ComputeMemory, d, s0, s1, s2, len, scratch);
cutilCheckMsg("fused2 execution failed\n");
}
make_no_scratch(fused0)
make_no_scratch(fused1)
make_no_scratch(fused2)
make_inplace(fused0, INPLACE_NONE)
make_inplace(fused1, INPLACE_1)
make_inplace(fused2, INPLACE_1)
vopdes_t vops[] = {
{FUSED, "fused0", 1, 1,
{Segdes,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal},
{NONE,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal},
{Int,},
{COMPAT1,},
{1,},
Elwise1},
{FUSED, "fused1", 2, 1,
{Float,Int,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal},
{NONE,NONE,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal},
{Float,},
{AGREE1,},
{1,},
Elwise2},
{FUSED, "fused2", 3, 1,
{Float,Int,Float,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal},
{NONE,NONE,NONE,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal},
{Float,},
{AGREE1,},
{1,},
Elwise3},
};
cvl_triple_t cvl_funs[] = {
{ { (void (*)())fused0, (int (*)())fused0_scratch, (unsigned (*)())fused0_inplace },},
{ { (void (*)())fused1, (int (*)())fused1_scratch, (unsigned (*)())fused1_inplace },},
{ { (void (*)())fused2, (int (*)())fused2_scratch, (unsigned (*)())fused2_inplace },},
};
/*
fused OP0#2 ($0 : SEGDES) = (DIST INT @ (CONST INT 100) $0)
fused OP1#1 ($0 : FLOAT, $1 : INT) = (/ FLOAT @ $0 (I_TO_F @ $1))
fused OP2#1 ($0 : FLOAT, $1 : INT, $2 : FLOAT) = (* FLOAT @ (/ FLOAT @ $0 (I_TO_F @ $1)) $2)
*/
| 67a69da219c76b58beda0539f455652ea3135284.cu | #include "config.h"
#include "vcode.h"
#include <cvl.h>
#include "y.tab.h"
#include <cutil_inline.h>
#include "defins.cuh"
MAXALIGN *ComputeMemory = NULL;
extern "C" void init (MAXALIGN *mem) {
ComputeMemory = mem;
}
__global__ void fused0Kernel(MAXALIGN *data, int dst, int s0, int len, int scratch) {
int address = blockDim.y * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
if (address < len) {
int *pDst = (int*)(&data[dst]);
int *pSrc0 = (int*)(&data[s0]);
pDst[address] = (100);
}
}
__global__ void fused1Kernel(MAXALIGN *data, int dst, int s0, int s1, int len, int scratch) {
int address = blockDim.y * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
if (address < len) {
float *pDst = (float*)(&data[dst]);
float *pSrc0 = (float*)(&data[s0]);
int *pSrc1 = (int*)(&data[s1]);
pDst[address] = (divide(pSrc0[address], (z_to_d(pSrc1[address]))));
}
}
__global__ void fused2Kernel(MAXALIGN *data, int dst, int s0, int s1, int s2, int len, int scratch) {
int address = blockDim.y * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
if (address < len) {
float *pDst = (float*)(&data[dst]);
float *pSrc0 = (float*)(&data[s0]);
int *pSrc1 = (int*)(&data[s1]);
float *pSrc2 = (float*)(&data[s2]);
pDst[address] = (times((divide(pSrc0[address], (z_to_d(pSrc1[address])))), pSrc2[address]));
}
}
void fused0(vec_p d, vec_p s0, int len, vec_p scratch) {
if (len==0) {return;}
SYNC();
DEF_BLOCKS_PER_GRID(len);
fused0Kernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(ComputeMemory, d, s0, len, scratch);
cutilCheckMsg("fused0 execution failed\n");
}
void fused1(vec_p d, vec_p s0, vec_p s1, int len, vec_p scratch) {
if (len==0) {return;}
SYNC();
DEF_BLOCKS_PER_GRID(len);
fused1Kernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(ComputeMemory, d, s0, s1, len, scratch);
cutilCheckMsg("fused1 execution failed\n");
}
void fused2(vec_p d, vec_p s0, vec_p s1, vec_p s2, int len, vec_p scratch) {
if (len==0) {return;}
SYNC();
DEF_BLOCKS_PER_GRID(len);
fused2Kernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(ComputeMemory, d, s0, s1, s2, len, scratch);
cutilCheckMsg("fused2 execution failed\n");
}
make_no_scratch(fused0)
make_no_scratch(fused1)
make_no_scratch(fused2)
make_inplace(fused0, INPLACE_NONE)
make_inplace(fused1, INPLACE_1)
make_inplace(fused2, INPLACE_1)
vopdes_t vops[] = {
{FUSED, "fused0", 1, 1,
{Segdes,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal},
{NONE,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal},
{Int,},
{COMPAT1,},
{1,},
Elwise1},
{FUSED, "fused1", 2, 1,
{Float,Int,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal},
{NONE,NONE,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal},
{Float,},
{AGREE1,},
{1,},
Elwise2},
{FUSED, "fused2", 3, 1,
{Float,Int,Float,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal},
{NONE,NONE,NONE,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal,Illegal},
{Float,},
{AGREE1,},
{1,},
Elwise3},
};
cvl_triple_t cvl_funs[] = {
{ { (void (*)())fused0, (int (*)())fused0_scratch, (unsigned (*)())fused0_inplace },},
{ { (void (*)())fused1, (int (*)())fused1_scratch, (unsigned (*)())fused1_inplace },},
{ { (void (*)())fused2, (int (*)())fused2_scratch, (unsigned (*)())fused2_inplace },},
};
/*
fused OP0#2 ($0 : SEGDES) = (DIST INT @ (CONST INT 100) $0)
fused OP1#1 ($0 : FLOAT, $1 : INT) = (/ FLOAT @ $0 (I_TO_F @ $1))
fused OP2#1 ($0 : FLOAT, $1 : INT, $2 : FLOAT) = (* FLOAT @ (/ FLOAT @ $0 (I_TO_F @ $1)) $2)
*/
|
7a0fc1e537b4ce098aec926499b9841bd1bb44f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdint.h>
#include "hiprand/hiprand_kernel.h"
#include "stars_config.h"
#include "stars_helpers.h"
#include "stars_helpers_c.h"
int
main()
{
output_t *outs ;
output_t *couts1;
output_t *couts2;
output_t *cstate_matrix;
output_t *cstate_matrix_shadow;
float *cpchange;
hiprandStateXORWOW_t *crgens;
unsigned int seed = generate_seed();
const int output_size = STARS * sizeof( output_t );
const int rgen_size = NEIGHBORHOODS * sizeof( hiprandStateXORWOW_t );
const int state_matrix_size = 2 * NUM_STATES * sizeof( output_t );
const int pchange_size = NUM_STATES * sizeof( float );
hipMalloc( (void**)&couts1, output_size );
hipMalloc( (void**)&couts2, output_size );
hipMalloc( (void**)&cstate_matrix, state_matrix_size );
hipMalloc( (void**)&cpchange, pchange_size );
hipMalloc( (void**)&crgens, rgen_size );
//TODO: are these being copied to the host correctly?
hipMemcpy( cstate_matrix, STATE_CHANGES, state_matrix_size, hipMemcpyHostToDevice );
hipMemcpy( cpchange, P_CHANGE, pchange_size, hipMemcpyHostToDevice );
hipLaunchKernelGGL(( init_buf), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, couts1, PROTOSTAR, NEIGHBORHOOD_STARS );
hipLaunchKernelGGL(( init_buf), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, couts2, PROTOSTAR, NEIGHBORHOOD_STARS );
hipLaunchKernelGGL(( init_rands), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, seed, crgens );
hipHostMalloc(&cstate_matrix_shadow, state_matrix_size, sizeof(output_t));
hipMemcpy( cstate_matrix_shadow, cstate_matrix, state_matrix_size, hipMemcpyDeviceToHost );
inspect( 'S', STATE_CHANGES, 2, state_matrix_size);
inspect( 's', cstate_matrix_shadow, 2, state_matrix_size);
hipHostMalloc(&outs, STARS, sizeof(output_t));
hipMemcpy( outs, couts1, output_size, hipMemcpyDeviceToHost );
inspect( 'x', outs, NEIGHBORHOOD_STARS, STARS );
hipMemcpy( outs, couts2, output_size, hipMemcpyDeviceToHost );
inspect( 'X', outs, NEIGHBORHOOD_STARS, STARS );
for( int i=0; i< ITERATIONS; i += 2 ){
hipLaunchKernelGGL(( iterate_states), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, crgens, couts1, couts2, NEIGHBORHOOD_STARS, cstate_matrix, cpchange );
hipLaunchKernelGGL(( iterate_states), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, crgens, couts2, couts1, NEIGHBORHOOD_STARS, cstate_matrix, cpchange );
//debug
hipMemcpy( outs, couts1, output_size, hipMemcpyDeviceToHost );
inspect( 'y', outs, NEIGHBORHOOD_STARS, STARS );
}
hipFree( couts1 );
hipFree( couts2 );
hipFree( crgens );
hipFree( cstate_matrix );
hipFree( cpchange );
return EXIT_SUCCESS;
}
| 7a0fc1e537b4ce098aec926499b9841bd1bb44f4.cu | #include <stdio.h>
#include <stdint.h>
#include "curand_kernel.h"
#include "stars_config.h"
#include "stars_helpers.h"
#include "stars_helpers_c.h"
int
main()
{
output_t *outs ;
output_t *couts1;
output_t *couts2;
output_t *cstate_matrix;
output_t *cstate_matrix_shadow;
float *cpchange;
curandStateXORWOW_t *crgens;
unsigned int seed = generate_seed();
const int output_size = STARS * sizeof( output_t );
const int rgen_size = NEIGHBORHOODS * sizeof( curandStateXORWOW_t );
const int state_matrix_size = 2 * NUM_STATES * sizeof( output_t );
const int pchange_size = NUM_STATES * sizeof( float );
cudaMalloc( (void**)&couts1, output_size );
cudaMalloc( (void**)&couts2, output_size );
cudaMalloc( (void**)&cstate_matrix, state_matrix_size );
cudaMalloc( (void**)&cpchange, pchange_size );
cudaMalloc( (void**)&crgens, rgen_size );
//TODO: are these being copied to the host correctly?
cudaMemcpy( cstate_matrix, STATE_CHANGES, state_matrix_size, cudaMemcpyHostToDevice );
cudaMemcpy( cpchange, P_CHANGE, pchange_size, cudaMemcpyHostToDevice );
init_buf<<<BLOCKS, THREADS_PER_BLOCK>>>( couts1, PROTOSTAR, NEIGHBORHOOD_STARS );
init_buf<<<BLOCKS, THREADS_PER_BLOCK>>>( couts2, PROTOSTAR, NEIGHBORHOOD_STARS );
init_rands<<<BLOCKS, THREADS_PER_BLOCK>>>( seed, crgens );
cudaHostAlloc(&cstate_matrix_shadow, state_matrix_size, sizeof(output_t));
cudaMemcpy( cstate_matrix_shadow, cstate_matrix, state_matrix_size, cudaMemcpyDeviceToHost );
inspect( 'S', STATE_CHANGES, 2, state_matrix_size);
inspect( 's', cstate_matrix_shadow, 2, state_matrix_size);
cudaHostAlloc(&outs, STARS, sizeof(output_t));
cudaMemcpy( outs, couts1, output_size, cudaMemcpyDeviceToHost );
inspect( 'x', outs, NEIGHBORHOOD_STARS, STARS );
cudaMemcpy( outs, couts2, output_size, cudaMemcpyDeviceToHost );
inspect( 'X', outs, NEIGHBORHOOD_STARS, STARS );
for( int i=0; i< ITERATIONS; i += 2 ){
iterate_states<<<BLOCKS, THREADS_PER_BLOCK>>>( crgens, couts1, couts2, NEIGHBORHOOD_STARS, cstate_matrix, cpchange );
iterate_states<<<BLOCKS, THREADS_PER_BLOCK>>>( crgens, couts2, couts1, NEIGHBORHOOD_STARS, cstate_matrix, cpchange );
//debug
cudaMemcpy( outs, couts1, output_size, cudaMemcpyDeviceToHost );
inspect( 'y', outs, NEIGHBORHOOD_STARS, STARS );
}
cudaFree( couts1 );
cudaFree( couts2 );
cudaFree( crgens );
cudaFree( cstate_matrix );
cudaFree( cpchange );
return EXIT_SUCCESS;
}
|
3b2d53f760c6cff99ccbf5a858fe76cc987a36f4.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h> // helper functions for CUDA timing and initialization
#include <helper_functions.h> // helper functions for timing, string parsing
#include "../include/muonDeviceWrapperGPU.cuh"
#include "../include/HoughKernels.cuh"
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <iomanip>
namespace TrigMuonModuleKernels{
DeviceManager::DeviceManager() : nDevices(0)
{
checkCudaErrors(hipGetDeviceCount(&nDevices));
}
APE::HoughTransformDeviceContext*
DeviceManager::createHTContext(const int& devId){
checkCudaErrors(hipSetDevice(devId));
APE::HoughTransformDeviceContext* p = new APE::HoughTransformDeviceContext;
p->m_deviceId = devId;
checkCudaErrors(hipMalloc((void **)&p->d_HTConfig, sizeof(HT_ALGO_CONFIGURATION)));
checkCudaErrors(hipMalloc((void **)&p->d_HTData, sizeof(INPUT_HT_DATA)));
checkCudaErrors(hipHostMalloc((void **)&p->h_HTConfig, sizeof(HT_ALGO_CONFIGURATION)));
checkCudaErrors(hipHostMalloc((void **)&p->h_HTData, sizeof(INPUT_HT_DATA)));
return p;
}
void DeviceManager::deleteHTContext(APE::HoughTransformDeviceContext* p){
checkCudaErrors(hipSetDevice(p->m_deviceId));
checkCudaErrors(hipFree(p->d_HTConfig));
checkCudaErrors(hipFree(p->d_HTData));
checkCudaErrors(hipHostFree(p->h_HTConfig));
checkCudaErrors(hipHostFree(p->h_HTData));
delete p;
}
float wrpHoughCtx(const APE::HoughTransformDeviceContext& devC){
checkCudaErrors(hipSetDevice(devC.m_deviceId));
//checkCudaErrors(hipMemcpy(devC.d_HTConfig, devC.h_HTConfig, sizeof(HT_ALGO_CONFIGURATION), hipMemcpyHostToDevice));
double * curvGM;
checkCudaErrors(hipMalloc((void**)&curvGM, sizeof(double) * curvBins));
hipLaunchKernelGGL(( copyCurvVals), dim3(1), dim3(curvBins*0.5), 0, 0, curvGM);
checkCudaErrors(hipStreamSynchronize(0));
copyCfgData((HT_ALGO_CONFIGURATION*) devC.h_HTConfig, curvGM);
checkCudaErrors(hipStreamSynchronize(0));
return 0.;
};
std::vector<float> wrpHoughAlgo(const APE::HoughTransformDeviceContext& devC, MUON_HOUGH_RED_PATTERN *pOutput){
std::vector<float> timeVec;
struct timeval tStart, tMid1, tMid2, tEnd;
float totalCUDATime = 0.;
gettimeofday (&tStart, NULL);
HT_ALGO_CONFIGURATION * hConf = reinterpret_cast<HT_ALGO_CONFIGURATION*>(devC.h_HTConfig);
INPUT_HT_DATA* hData = reinterpret_cast<INPUT_HT_DATA*>(devC.h_HTData);
int Nsec[2] = {hConf->steps.sectors.xyz, hConf->steps.sectors.rz};
int NA[2] = {(int)(2*hConf->steps.ip.xy/hConf->steps.stepsize.xy), (int)hConf->steps.nbins_curved};//+2
//S:12,16 A:16,160 B:1440,720
int voteXY = hData->m_nVoteXY;
int pattXY = hData->m_nPattXY;
int voteCC = hData->m_nVoteRZ;
int pattCC = hData->m_nPattRZ;
gettimeofday (&tMid1, NULL);
std::cout << "TOTAL Host Preliminary Stuff " << (((tMid1.tv_sec - tStart.tv_sec)*1000000L +tMid1.tv_usec) - tStart.tv_usec) * 0.001 << " ms" << std::endl;
// stomp a foot on the device
checkCudaErrors(hipSetDevice(devC.m_deviceId));
hipDeviceProp_t prop;
checkCudaErrors(hipGetDeviceProperties(&prop, devC.m_deviceId));
int gpuProps[2] = {prop.warpSize, prop.maxThreadsPerBlock};
gettimeofday (&tMid2, NULL);
std::cout << "TOTAL DEVICE Preliminary Stuff " << (((tMid2.tv_sec - tMid1.tv_sec)*1000000L +tMid2.tv_usec) - tMid1.tv_usec) * 0.001 << " ms" << std::endl;
// create and start CUDA timer
StopWatchInterface *timerCUDA = 0;
sdkCreateTimer(&timerCUDA);
sdkResetTimer(&timerCUDA);
sdkStartTimer(&timerCUDA);
// Copy from Host to Device
checkCudaErrors(hipMemcpyAsync(devC.d_HTData, devC.h_HTData, sizeof(INPUT_HT_DATA), hipMemcpyHostToDevice, 0));
// allocate output mem area
MUON_HOUGH_RED_PATTERN * pOut;
checkCudaErrors(hipMalloc((void **)&pOut, sizeof(MUON_HOUGH_RED_PATTERN)));
//checkCudaErrors(hipMemcpyAsync(pOut, pOutput, sizeof(MUON_HOUGH_RED_PATTERN), hipMemcpyHostToDevice, 0));
int * votXYHit, * assXYHit, * votCCHit, * assCCHit;
checkCudaErrors(hipMalloc((void **)&votXYHit, sizeof(int) * voteXY));
checkCudaErrors(hipMalloc((void **)&assXYHit, sizeof(int) * pattXY));
checkCudaErrors(hipMalloc((void **)&votCCHit, sizeof(int) * voteCC));
checkCudaErrors(hipMalloc((void **)&assCCHit, sizeof(int) * pattCC));
/*
int * b_xy_maxes, * v_xy_maxes;
checkCudaErrors(hipMalloc(&b_xy_maxes, sizeof(int) * Nsec[0] * NA[0]));
checkCudaErrors(hipMalloc(&v_xy_maxes, sizeof(int) * Nsec[0] * NA[0]));*/
int * b_cc_maxes, * v_cc_maxes;
checkCudaErrors(hipMalloc(&b_cc_maxes, sizeof(int) * Nsec[1] * NA[1]));
checkCudaErrors(hipMalloc(&v_cc_maxes, sizeof(int) * Nsec[1] * NA[1]));
int * devProps;
checkCudaErrors(hipMalloc((void**)&devProps, 2*sizeof(int)));
checkCudaErrors(hipMemcpyAsync(devProps, gpuProps, 2*sizeof(int), hipMemcpyHostToDevice, 0));
/*
int * s_xy_max, * b_xy_max, * v_xy_max;
checkCudaErrors(hipMalloc(&s_xy_max, sizeof(int) * Nsec[0]));
checkCudaErrors(hipMalloc(&b_xy_max, sizeof(int) * Nsec[0]));
checkCudaErrors(hipMalloc(&v_xy_max, sizeof(int) * Nsec[0]));*/
int * s_cc_max, * b_cc_max, * v_cc_max;
checkCudaErrors(hipMalloc(&s_cc_max, sizeof(int) * Nsec[1]));
checkCudaErrors(hipMalloc(&b_cc_max, sizeof(int) * Nsec[1]));
checkCudaErrors(hipMalloc(&v_cc_max, sizeof(int) * Nsec[1]));
int * controls;
checkCudaErrors(hipMalloc((void**)&controls, sizeof(int) * 4));
sdkStopTimer(&timerCUDA);
float TimerCUDASpan = sdkGetAverageTimerValue(&timerCUDA);
timeVec.push_back(TimerCUDASpan);
totalCUDATime += TimerCUDASpan;
std::cout << "Input allocation time: " << TimerCUDASpan << " ms" << std::endl;
sdkResetTimer(&timerCUDA);
sdkStartTimer(&timerCUDA);
int * monitor;
checkCudaErrors(hipMalloc(&monitor, sizeof(int)*NA[1]));
//houghAlgo<<< 1, 1 >>>(devProps, controls, votXYHit, assXYHit, votCCHit, assCCHit, (INPUT_HT_DATA*) devC.d_HTData, b_xy_maxes, v_xy_maxes, b_cc_maxes, v_cc_maxes, s_xy_max, b_xy_max, v_xy_max, s_cc_max, b_cc_max, v_cc_max, pOut);
hipLaunchKernelGGL(( houghAlgo), dim3(1), dim3(1) , 0, 0, devProps, controls, votXYHit, assXYHit, votCCHit, assCCHit, (INPUT_HT_DATA*) devC.d_HTData, b_cc_maxes, v_cc_maxes, s_cc_max, b_cc_max, v_cc_max, pOut, monitor);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("Wrapper Kernel execution failed");
//int * h_xy=(int*)malloc(sizeof(int)*Nsec[0]*NA[0]);
int * h_cc=(int*)malloc(sizeof(int)*Nsec[1]*NA[1]);
//checkCudaErrors(hipMemcpyAsync((void*)h_xy, v_xy_maxes, sizeof(int)*Nsec[0]*NA[0], hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy((void*)h_cc, v_cc_maxes, sizeof(int)*Nsec[1]*NA[1], hipMemcpyDeviceToHost));
/*
std::cout << "XY\n";
for(int i=0;i<Nsec[0]*NA[0];i++)
std::cout << i << ":" << h_xy[i] << " ";*/
std::cout << "\nCYLINDER\n";
for(int i=0;i<Nsec[1]*NA[1];i++)
std::cout << (int)((i+0.)/NA[1]) << ":" << h_cc[i] << " ";
sdkStopTimer(&timerCUDA);
TimerCUDASpan = sdkGetAverageTimerValue(&timerCUDA);
int * h_mon=(int*)malloc(sizeof(int)*NA[1]);
checkCudaErrors(hipMemcpy((void*)h_mon, monitor, sizeof(int)*NA[1], hipMemcpyDeviceToHost));
std::cout << "\n";
for(int y=0;y<NA[1];y++)
std::cout << y << ":" << h_mon[y] << " ";//+0.)/100000. << " ";
std::cout << "\n";
timeVec.push_back(TimerCUDASpan);
totalCUDATime += TimerCUDASpan;
std::cout <<"Wrapper kernel execution " << TimerCUDASpan << " ms";// << std::endl;
sdkResetTimer(&timerCUDA);
sdkStartTimer(&timerCUDA);
checkCudaErrors(hipMemcpyAsync((void*)pOutput, pOut, sizeof(MUON_HOUGH_RED_PATTERN), hipMemcpyDeviceToHost));
sdkStopTimer(&timerCUDA);
TimerCUDASpan = sdkGetAverageTimerValue(&timerCUDA);
timeVec.push_back(TimerCUDASpan);
totalCUDATime += TimerCUDASpan;
std::cout <<"Output copy to host " << TimerCUDASpan << " ms" << std::endl;
checkCudaErrors(hipFree(devProps));
checkCudaErrors(hipFree(controls));/*
checkCudaErrors(hipFree(b_xy_maxes));
checkCudaErrors(hipFree(v_xy_maxes));*/
checkCudaErrors(hipFree(b_cc_maxes));
checkCudaErrors(hipFree(v_cc_maxes));/*
checkCudaErrors(hipFree(s_xy_max));
checkCudaErrors(hipFree(b_xy_max));
checkCudaErrors(hipFree(v_xy_max));*/
checkCudaErrors(hipFree(s_cc_max));
checkCudaErrors(hipFree(b_cc_max));
checkCudaErrors(hipFree(v_cc_max));
checkCudaErrors(hipFree(votXYHit));
checkCudaErrors(hipFree(assXYHit));
checkCudaErrors(hipFree(votCCHit));
checkCudaErrors(hipFree(assCCHit));
checkCudaErrors(hipFree(pOut));
checkCudaErrors(hipStreamSynchronize(0));
gettimeofday (&tEnd, NULL);
float totalRUNTime = (((tEnd.tv_sec - tStart.tv_sec)*1000000L +tEnd.tv_usec) - tStart.tv_usec) * 0.001;
std::cout << "TOTAL RUNNING TIME " << totalRUNTime << " ms\nTOTAL CUDA TIME " << totalCUDATime << " ms" << std::endl;
return timeVec;
};
} | 3b2d53f760c6cff99ccbf5a858fe76cc987a36f4.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <helper_cuda.h> // helper functions for CUDA timing and initialization
#include <helper_functions.h> // helper functions for timing, string parsing
#include "../include/muonDeviceWrapperGPU.cuh"
#include "../include/HoughKernels.cuh"
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <iomanip>
namespace TrigMuonModuleKernels{
DeviceManager::DeviceManager() : nDevices(0)
{
checkCudaErrors(cudaGetDeviceCount(&nDevices));
}
APE::HoughTransformDeviceContext*
DeviceManager::createHTContext(const int& devId){
checkCudaErrors(cudaSetDevice(devId));
APE::HoughTransformDeviceContext* p = new APE::HoughTransformDeviceContext;
p->m_deviceId = devId;
checkCudaErrors(cudaMalloc((void **)&p->d_HTConfig, sizeof(HT_ALGO_CONFIGURATION)));
checkCudaErrors(cudaMalloc((void **)&p->d_HTData, sizeof(INPUT_HT_DATA)));
checkCudaErrors(cudaMallocHost((void **)&p->h_HTConfig, sizeof(HT_ALGO_CONFIGURATION)));
checkCudaErrors(cudaMallocHost((void **)&p->h_HTData, sizeof(INPUT_HT_DATA)));
return p;
}
void DeviceManager::deleteHTContext(APE::HoughTransformDeviceContext* p){
checkCudaErrors(cudaSetDevice(p->m_deviceId));
checkCudaErrors(cudaFree(p->d_HTConfig));
checkCudaErrors(cudaFree(p->d_HTData));
checkCudaErrors(cudaFreeHost(p->h_HTConfig));
checkCudaErrors(cudaFreeHost(p->h_HTData));
delete p;
}
float wrpHoughCtx(const APE::HoughTransformDeviceContext& devC){
checkCudaErrors(cudaSetDevice(devC.m_deviceId));
//checkCudaErrors(cudaMemcpy(devC.d_HTConfig, devC.h_HTConfig, sizeof(HT_ALGO_CONFIGURATION), cudaMemcpyHostToDevice));
double * curvGM;
checkCudaErrors(cudaMalloc((void**)&curvGM, sizeof(double) * curvBins));
copyCurvVals<<<1, curvBins*0.5>>>(curvGM);
checkCudaErrors(cudaStreamSynchronize(0));
copyCfgData((HT_ALGO_CONFIGURATION*) devC.h_HTConfig, curvGM);
checkCudaErrors(cudaStreamSynchronize(0));
return 0.;
};
std::vector<float> wrpHoughAlgo(const APE::HoughTransformDeviceContext& devC, MUON_HOUGH_RED_PATTERN *pOutput){
std::vector<float> timeVec;
struct timeval tStart, tMid1, tMid2, tEnd;
float totalCUDATime = 0.;
gettimeofday (&tStart, NULL);
HT_ALGO_CONFIGURATION * hConf = reinterpret_cast<HT_ALGO_CONFIGURATION*>(devC.h_HTConfig);
INPUT_HT_DATA* hData = reinterpret_cast<INPUT_HT_DATA*>(devC.h_HTData);
int Nsec[2] = {hConf->steps.sectors.xyz, hConf->steps.sectors.rz};
int NA[2] = {(int)(2*hConf->steps.ip.xy/hConf->steps.stepsize.xy), (int)hConf->steps.nbins_curved};//+2
//S:12,16 A:16,160 B:1440,720
int voteXY = hData->m_nVoteXY;
int pattXY = hData->m_nPattXY;
int voteCC = hData->m_nVoteRZ;
int pattCC = hData->m_nPattRZ;
gettimeofday (&tMid1, NULL);
std::cout << "TOTAL Host Preliminary Stuff " << (((tMid1.tv_sec - tStart.tv_sec)*1000000L +tMid1.tv_usec) - tStart.tv_usec) * 0.001 << " ms" << std::endl;
// stomp a foot on the device
checkCudaErrors(cudaSetDevice(devC.m_deviceId));
cudaDeviceProp prop;
checkCudaErrors(cudaGetDeviceProperties(&prop, devC.m_deviceId));
int gpuProps[2] = {prop.warpSize, prop.maxThreadsPerBlock};
gettimeofday (&tMid2, NULL);
std::cout << "TOTAL DEVICE Preliminary Stuff " << (((tMid2.tv_sec - tMid1.tv_sec)*1000000L +tMid2.tv_usec) - tMid1.tv_usec) * 0.001 << " ms" << std::endl;
// create and start CUDA timer
StopWatchInterface *timerCUDA = 0;
sdkCreateTimer(&timerCUDA);
sdkResetTimer(&timerCUDA);
sdkStartTimer(&timerCUDA);
// Copy from Host to Device
checkCudaErrors(cudaMemcpyAsync(devC.d_HTData, devC.h_HTData, sizeof(INPUT_HT_DATA), cudaMemcpyHostToDevice, 0));
// allocate output mem area
MUON_HOUGH_RED_PATTERN * pOut;
checkCudaErrors(cudaMalloc((void **)&pOut, sizeof(MUON_HOUGH_RED_PATTERN)));
//checkCudaErrors(cudaMemcpyAsync(pOut, pOutput, sizeof(MUON_HOUGH_RED_PATTERN), cudaMemcpyHostToDevice, 0));
int * votXYHit, * assXYHit, * votCCHit, * assCCHit;
checkCudaErrors(cudaMalloc((void **)&votXYHit, sizeof(int) * voteXY));
checkCudaErrors(cudaMalloc((void **)&assXYHit, sizeof(int) * pattXY));
checkCudaErrors(cudaMalloc((void **)&votCCHit, sizeof(int) * voteCC));
checkCudaErrors(cudaMalloc((void **)&assCCHit, sizeof(int) * pattCC));
/*
int * b_xy_maxes, * v_xy_maxes;
checkCudaErrors(cudaMalloc(&b_xy_maxes, sizeof(int) * Nsec[0] * NA[0]));
checkCudaErrors(cudaMalloc(&v_xy_maxes, sizeof(int) * Nsec[0] * NA[0]));*/
int * b_cc_maxes, * v_cc_maxes;
checkCudaErrors(cudaMalloc(&b_cc_maxes, sizeof(int) * Nsec[1] * NA[1]));
checkCudaErrors(cudaMalloc(&v_cc_maxes, sizeof(int) * Nsec[1] * NA[1]));
int * devProps;
checkCudaErrors(cudaMalloc((void**)&devProps, 2*sizeof(int)));
checkCudaErrors(cudaMemcpyAsync(devProps, gpuProps, 2*sizeof(int), cudaMemcpyHostToDevice, 0));
/*
int * s_xy_max, * b_xy_max, * v_xy_max;
checkCudaErrors(cudaMalloc(&s_xy_max, sizeof(int) * Nsec[0]));
checkCudaErrors(cudaMalloc(&b_xy_max, sizeof(int) * Nsec[0]));
checkCudaErrors(cudaMalloc(&v_xy_max, sizeof(int) * Nsec[0]));*/
int * s_cc_max, * b_cc_max, * v_cc_max;
checkCudaErrors(cudaMalloc(&s_cc_max, sizeof(int) * Nsec[1]));
checkCudaErrors(cudaMalloc(&b_cc_max, sizeof(int) * Nsec[1]));
checkCudaErrors(cudaMalloc(&v_cc_max, sizeof(int) * Nsec[1]));
int * controls;
checkCudaErrors(cudaMalloc((void**)&controls, sizeof(int) * 4));
sdkStopTimer(&timerCUDA);
float TimerCUDASpan = sdkGetAverageTimerValue(&timerCUDA);
timeVec.push_back(TimerCUDASpan);
totalCUDATime += TimerCUDASpan;
std::cout << "Input allocation time: " << TimerCUDASpan << " ms" << std::endl;
sdkResetTimer(&timerCUDA);
sdkStartTimer(&timerCUDA);
int * monitor;
checkCudaErrors(cudaMalloc(&monitor, sizeof(int)*NA[1]));
//houghAlgo<<< 1, 1 >>>(devProps, controls, votXYHit, assXYHit, votCCHit, assCCHit, (INPUT_HT_DATA*) devC.d_HTData, b_xy_maxes, v_xy_maxes, b_cc_maxes, v_cc_maxes, s_xy_max, b_xy_max, v_xy_max, s_cc_max, b_cc_max, v_cc_max, pOut);
houghAlgo<<< 1, 1 >>>(devProps, controls, votXYHit, assXYHit, votCCHit, assCCHit, (INPUT_HT_DATA*) devC.d_HTData, b_cc_maxes, v_cc_maxes, s_cc_max, b_cc_max, v_cc_max, pOut, monitor);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("Wrapper Kernel execution failed");
//int * h_xy=(int*)malloc(sizeof(int)*Nsec[0]*NA[0]);
int * h_cc=(int*)malloc(sizeof(int)*Nsec[1]*NA[1]);
//checkCudaErrors(cudaMemcpyAsync((void*)h_xy, v_xy_maxes, sizeof(int)*Nsec[0]*NA[0], cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy((void*)h_cc, v_cc_maxes, sizeof(int)*Nsec[1]*NA[1], cudaMemcpyDeviceToHost));
/*
std::cout << "XY\n";
for(int i=0;i<Nsec[0]*NA[0];i++)
std::cout << i << ":" << h_xy[i] << " ";*/
std::cout << "\nCYLINDER\n";
for(int i=0;i<Nsec[1]*NA[1];i++)
std::cout << (int)((i+0.)/NA[1]) << ":" << h_cc[i] << " ";
sdkStopTimer(&timerCUDA);
TimerCUDASpan = sdkGetAverageTimerValue(&timerCUDA);
int * h_mon=(int*)malloc(sizeof(int)*NA[1]);
checkCudaErrors(cudaMemcpy((void*)h_mon, monitor, sizeof(int)*NA[1], cudaMemcpyDeviceToHost));
std::cout << "\n";
for(int y=0;y<NA[1];y++)
std::cout << y << ":" << h_mon[y] << " ";//+0.)/100000. << " ";
std::cout << "\n";
timeVec.push_back(TimerCUDASpan);
totalCUDATime += TimerCUDASpan;
std::cout <<"Wrapper kernel execution " << TimerCUDASpan << " ms";// << std::endl;
sdkResetTimer(&timerCUDA);
sdkStartTimer(&timerCUDA);
checkCudaErrors(cudaMemcpyAsync((void*)pOutput, pOut, sizeof(MUON_HOUGH_RED_PATTERN), cudaMemcpyDeviceToHost));
sdkStopTimer(&timerCUDA);
TimerCUDASpan = sdkGetAverageTimerValue(&timerCUDA);
timeVec.push_back(TimerCUDASpan);
totalCUDATime += TimerCUDASpan;
std::cout <<"Output copy to host " << TimerCUDASpan << " ms" << std::endl;
checkCudaErrors(cudaFree(devProps));
checkCudaErrors(cudaFree(controls));/*
checkCudaErrors(cudaFree(b_xy_maxes));
checkCudaErrors(cudaFree(v_xy_maxes));*/
checkCudaErrors(cudaFree(b_cc_maxes));
checkCudaErrors(cudaFree(v_cc_maxes));/*
checkCudaErrors(cudaFree(s_xy_max));
checkCudaErrors(cudaFree(b_xy_max));
checkCudaErrors(cudaFree(v_xy_max));*/
checkCudaErrors(cudaFree(s_cc_max));
checkCudaErrors(cudaFree(b_cc_max));
checkCudaErrors(cudaFree(v_cc_max));
checkCudaErrors(cudaFree(votXYHit));
checkCudaErrors(cudaFree(assXYHit));
checkCudaErrors(cudaFree(votCCHit));
checkCudaErrors(cudaFree(assCCHit));
checkCudaErrors(cudaFree(pOut));
checkCudaErrors(cudaStreamSynchronize(0));
gettimeofday (&tEnd, NULL);
float totalRUNTime = (((tEnd.tv_sec - tStart.tv_sec)*1000000L +tEnd.tv_usec) - tStart.tv_usec) * 0.001;
std::cout << "TOTAL RUNNING TIME " << totalRUNTime << " ms\nTOTAL CUDA TIME " << totalCUDATime << " ms" << std::endl;
return timeVec;
};
} |
7ef6858585b47279a5aab2873b3845df03d98663.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "shared.h"
#include <hiprand/hiprand_kernel.h>
using namespace std;
typedef struct Node
{
int id;
Node *prev;
Node *next;
__device__ Node(int id) : id(id){};
} Node_t;
#define CUDA_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int maxIndex(int * array, int size) {
int max = 0;
for (int i = 0; i < size; i++) {
if (array[i] > array[max])
max = i;
}
return max;
}
__global__ void init_rng(int nthreads, hiprandState_t *states, unsigned long long seed, unsigned long long offset)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= nthreads)
return;
/* Each thread gets same seed, a different sequence number, no offset */
hiprand_init(seed + id, 0, offset, &states[id]);
}
__global__ void generate_rr_sets(float *data, int *rows, int *cols, bool *out, int *nodeHistogram, int numNodes, int numSets, hiprandState_t *states)
{
const unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < numSets)
{
hiprandState_t *state = &states[tid];
/* Because C does not give us the luxury of dynamic arrays, to imitate the
behavior of a stack, I am using a linked list*/
int randomNodeId = ceil(numNodes * hiprand_uniform(state)) - 1;
Node *stack = new Node(randomNodeId);
Node *auxiliary = new Node(AUXILIARY_NODE_ID);
auxiliary->next = stack;
stack->prev = auxiliary;
// Returns false when stack is NULL
while (stack != NULL && stack->id != AUXILIARY_NODE_ID)
{
// pop from stack
int currentNodeId = stack->id;
Node *temp = stack;
stack = stack->prev;
delete temp;
// If current is not in visited
if (!out[tid * numNodes + currentNodeId])
{
out[tid * numNodes + currentNodeId] = true;
atomicAdd(&nodeHistogram[currentNodeId], 1);
int dataStart = rows[currentNodeId];
int dataEnd = rows[currentNodeId + 1];
for (unsigned int i = dataStart; i < dataEnd; i++)
{
if (hiprand_uniform(state) < data[i])
{
// append to stack
stack->next = new Node(cols[i]);
stack->next->prev = stack;
stack = stack->next;
}
}
}
}
delete auxiliary;
}
}
__global__ void update_counts(bool *data, int *rows, int *cols, int *histogram, int numRows, int numNodes, int nodeToDelete)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= numRows)
return;
int dataStart = rows[row];
int dataEnd = rows[row + 1];
bool exists = false;
// Lets first figure out if this set includes the delete node and still exists
for (int i = dataStart; i < dataEnd; i++)
{
if (data[i] && cols[i] == nodeToDelete)
{
exists = true;
break;
}
}
if (exists)
{
// Now lets set all of them to false and update histogram concurrently
for (int i = dataStart; i < dataEnd; i++)
{
data[i] = false;
atomicSub(&histogram[cols[i]], 1);
}
}
}
unordered_set<int> nodeSelection(CSR<float> *graph, int k, double theta)
{
unordered_set<int>::iterator it;
unordered_set<int> seeds;
map<int, unordered_set<int>> R;
float *deviceDataFloat;
bool *deviceDataBool;
int *deviceRows;
int *deviceCols;
int *deviceNodeHistogram;
int *hostNodeHistogram;
bool *deviceProcessedRows;
bool *hostProcessedRows;
hiprandState_t *deviceStates;
// Initialize data, rows, and cols
int sizeOfData = graph->data.size() * sizeof(float);
int sizeOfRows = graph->rows.size() * sizeof(int);
int sizeOfCols = graph->cols.size() * sizeof(int);
CUDA_CHECK(hipMalloc((void **)&deviceDataFloat, sizeOfData));
CUDA_CHECK(hipMalloc((void **)&deviceRows, sizeOfRows));
CUDA_CHECK(hipMalloc((void **)&deviceCols, sizeOfCols));
CUDA_CHECK(hipMemcpy(deviceDataFloat, &(graph->data[0]), sizeOfData, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(deviceRows, &(graph->rows[0]), sizeOfRows, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(deviceCols, &(graph->cols[0]), sizeOfCols, hipMemcpyHostToDevice));
// Initialize output of kernel
int numNodes = (int)graph->rows.size() - 1;
int sizeOfNodeHistogram = numNodes * sizeof(int);
CUDA_CHECK(hipMalloc((void **)&deviceNodeHistogram, sizeOfNodeHistogram));
CUDA_CHECK(hipMemset(deviceNodeHistogram, 0, sizeOfNodeHistogram));
hostNodeHistogram = (int*) malloc(sizeOfNodeHistogram);
// Calculate number of batches
int numBatches = ceil(theta / NUM_ROWS_PER_BATCH);
// Initialize processed rows output
long long int sizeOfProcessedRows = sizeof(bool) * NUM_ROWS_PER_BATCH * numNodes;
CUDA_CHECK(hipMalloc((void **)&deviceProcessedRows, sizeOfProcessedRows));
hostProcessedRows = (bool *)malloc(sizeOfProcessedRows);
// Initialize RNG States
CUDA_CHECK(hipMalloc((void **)&deviceStates, NUM_ROWS_PER_BATCH * sizeof(hiprandState_t)));
dim3 dimGrid((NUM_ROWS_PER_BATCH / BLOCK_SIZE) + 1, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( init_rng), dim3(dimGrid), dim3(dimBlock), 0, 0, NUM_ROWS_PER_BATCH, deviceStates, 1, 0);
CUDA_CHECK(hipPeekAtLastError());
CUDA_CHECK(hipDeviceSynchronize());
// Process batches
int numRowsProcessed = 0;
// Can't use bool: https://stackoverflow.com/questions/8399417/why-vectorboolreference-doesnt-return-reference-to-bool
CSR<char> *processedRows = new CSR<char>();
for (int i = 0; i < numBatches; i++)
{
// Reset processed rows for new kernel
CUDA_CHECK(hipMemset(deviceProcessedRows, false, sizeOfProcessedRows));
// Process the minimum number of rows
int numRowsToProcess = min(NUM_ROWS_PER_BATCH, (int)ceil(theta) - numRowsProcessed);
// Launch RR generation kernel
dimGrid = dim3(ceil(float(numRowsToProcess) / BLOCK_SIZE), 1, 1);
dimBlock = dim3(BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( generate_rr_sets), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceDataFloat, deviceRows, deviceCols, deviceProcessedRows, deviceNodeHistogram, numNodes, numRowsToProcess, deviceStates);
CUDA_CHECK(hipPeekAtLastError());
CUDA_CHECK(hipDeviceSynchronize());
// Add to our running processedRows CSR using the rows and cols members
CUDA_CHECK(hipMemcpy(hostProcessedRows, deviceProcessedRows, sizeOfProcessedRows, hipMemcpyDeviceToHost));
for (int j = 0; j < numRowsToProcess; j++)
{
processedRows->rows.push_back(processedRows->data.size());
for (int k = 0; k < numNodes; k++)
{
if (hostProcessedRows[(unsigned long long int)j * numNodes + k])
{
processedRows->data.push_back(true);
processedRows->cols.push_back(k);
}
}
}
processedRows->rows.push_back(processedRows->data.size());
numRowsProcessed += numRowsToProcess;
}
// Copy our processedRows CSR to device
CUDA_CHECK(hipFree(deviceRows));
CUDA_CHECK(hipFree(deviceCols));
CUDA_CHECK(hipFree(deviceDataFloat));
sizeOfData = processedRows->data.size() * sizeof(char);
sizeOfRows = processedRows->rows.size() * sizeof(int);
sizeOfCols = processedRows->cols.size() * sizeof(int);
CUDA_CHECK(hipMalloc((void **)&deviceDataBool, sizeOfData));
CUDA_CHECK(hipMalloc((void **)&deviceRows, sizeOfRows));
CUDA_CHECK(hipMalloc((void **)&deviceCols, sizeOfCols));
CUDA_CHECK(hipMemcpy(deviceDataBool, &(processedRows->data[0]), sizeOfData, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(deviceRows, &(processedRows->rows[0]), sizeOfRows, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(deviceCols, &(processedRows->cols[0]), sizeOfCols, hipMemcpyHostToDevice));
// Initialize dimensions for count updating
unsigned int mostCommonNode;
dimGrid = dim3(ceil(float(numRowsProcessed) / BLOCK_SIZE), 1, 1);
dimBlock = dim3(BLOCK_SIZE, 1, 1);
// Copy out node histogram to host
CUDA_CHECK(hipMemcpy(hostNodeHistogram, deviceNodeHistogram, sizeOfNodeHistogram, hipMemcpyDeviceToHost));
// Select nodes using histogram and processedRows CSR
for (int j = 0; j < k - 1; j++)
{
mostCommonNode = maxIndex(hostNodeHistogram, numNodes);
seeds.insert(mostCommonNode);
hipLaunchKernelGGL(( update_counts), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceDataBool, deviceRows, deviceCols, deviceNodeHistogram, numRowsProcessed, numNodes, mostCommonNode);
CUDA_CHECK(hipPeekAtLastError());
CUDA_CHECK(hipDeviceSynchronize());
CUDA_CHECK(hipMemcpy(hostNodeHistogram, deviceNodeHistogram, sizeOfNodeHistogram, hipMemcpyDeviceToHost));
}
mostCommonNode = maxIndex(hostNodeHistogram, numNodes);
seeds.insert(mostCommonNode);
CUDA_CHECK(hipFree(deviceDataBool));
CUDA_CHECK(hipFree(deviceRows));
CUDA_CHECK(hipFree(deviceCols));
CUDA_CHECK(hipFree(deviceProcessedRows));
CUDA_CHECK(hipFree(deviceStates));
free(hostProcessedRows);
delete processedRows;
return seeds;
}
int main(int argc, char **argv)
{
Benchmark b;
b.setNodeSelectionFunction(nodeSelection);
b.run();
return 0;
}
| 7ef6858585b47279a5aab2873b3845df03d98663.cu | #include "shared.h"
#include <curand_kernel.h>
using namespace std;
typedef struct Node
{
int id;
Node *prev;
Node *next;
__device__ Node(int id) : id(id){};
} Node_t;
#define CUDA_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int maxIndex(int * array, int size) {
int max = 0;
for (int i = 0; i < size; i++) {
if (array[i] > array[max])
max = i;
}
return max;
}
__global__ void init_rng(int nthreads, curandState *states, unsigned long long seed, unsigned long long offset)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= nthreads)
return;
/* Each thread gets same seed, a different sequence number, no offset */
curand_init(seed + id, 0, offset, &states[id]);
}
__global__ void generate_rr_sets(float *data, int *rows, int *cols, bool *out, int *nodeHistogram, int numNodes, int numSets, curandState *states)
{
const unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < numSets)
{
curandState *state = &states[tid];
/* Because C does not give us the luxury of dynamic arrays, to imitate the
behavior of a stack, I am using a linked list*/
int randomNodeId = ceil(numNodes * curand_uniform(state)) - 1;
Node *stack = new Node(randomNodeId);
Node *auxiliary = new Node(AUXILIARY_NODE_ID);
auxiliary->next = stack;
stack->prev = auxiliary;
// Returns false when stack is NULL
while (stack != NULL && stack->id != AUXILIARY_NODE_ID)
{
// pop from stack
int currentNodeId = stack->id;
Node *temp = stack;
stack = stack->prev;
delete temp;
// If current is not in visited
if (!out[tid * numNodes + currentNodeId])
{
out[tid * numNodes + currentNodeId] = true;
atomicAdd(&nodeHistogram[currentNodeId], 1);
int dataStart = rows[currentNodeId];
int dataEnd = rows[currentNodeId + 1];
for (unsigned int i = dataStart; i < dataEnd; i++)
{
if (curand_uniform(state) < data[i])
{
// append to stack
stack->next = new Node(cols[i]);
stack->next->prev = stack;
stack = stack->next;
}
}
}
}
delete auxiliary;
}
}
__global__ void update_counts(bool *data, int *rows, int *cols, int *histogram, int numRows, int numNodes, int nodeToDelete)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= numRows)
return;
int dataStart = rows[row];
int dataEnd = rows[row + 1];
bool exists = false;
// Lets first figure out if this set includes the delete node and still exists
for (int i = dataStart; i < dataEnd; i++)
{
if (data[i] && cols[i] == nodeToDelete)
{
exists = true;
break;
}
}
if (exists)
{
// Now lets set all of them to false and update histogram concurrently
for (int i = dataStart; i < dataEnd; i++)
{
data[i] = false;
atomicSub(&histogram[cols[i]], 1);
}
}
}
unordered_set<int> nodeSelection(CSR<float> *graph, int k, double theta)
{
unordered_set<int>::iterator it;
unordered_set<int> seeds;
map<int, unordered_set<int>> R;
float *deviceDataFloat;
bool *deviceDataBool;
int *deviceRows;
int *deviceCols;
int *deviceNodeHistogram;
int *hostNodeHistogram;
bool *deviceProcessedRows;
bool *hostProcessedRows;
curandState *deviceStates;
// Initialize data, rows, and cols
int sizeOfData = graph->data.size() * sizeof(float);
int sizeOfRows = graph->rows.size() * sizeof(int);
int sizeOfCols = graph->cols.size() * sizeof(int);
CUDA_CHECK(cudaMalloc((void **)&deviceDataFloat, sizeOfData));
CUDA_CHECK(cudaMalloc((void **)&deviceRows, sizeOfRows));
CUDA_CHECK(cudaMalloc((void **)&deviceCols, sizeOfCols));
CUDA_CHECK(cudaMemcpy(deviceDataFloat, &(graph->data[0]), sizeOfData, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(deviceRows, &(graph->rows[0]), sizeOfRows, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(deviceCols, &(graph->cols[0]), sizeOfCols, cudaMemcpyHostToDevice));
// Initialize output of kernel
int numNodes = (int)graph->rows.size() - 1;
int sizeOfNodeHistogram = numNodes * sizeof(int);
CUDA_CHECK(cudaMalloc((void **)&deviceNodeHistogram, sizeOfNodeHistogram));
CUDA_CHECK(cudaMemset(deviceNodeHistogram, 0, sizeOfNodeHistogram));
hostNodeHistogram = (int*) malloc(sizeOfNodeHistogram);
// Calculate number of batches
int numBatches = ceil(theta / NUM_ROWS_PER_BATCH);
// Initialize processed rows output
long long int sizeOfProcessedRows = sizeof(bool) * NUM_ROWS_PER_BATCH * numNodes;
CUDA_CHECK(cudaMalloc((void **)&deviceProcessedRows, sizeOfProcessedRows));
hostProcessedRows = (bool *)malloc(sizeOfProcessedRows);
// Initialize RNG States
CUDA_CHECK(cudaMalloc((void **)&deviceStates, NUM_ROWS_PER_BATCH * sizeof(curandState)));
dim3 dimGrid((NUM_ROWS_PER_BATCH / BLOCK_SIZE) + 1, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
init_rng<<<dimGrid, dimBlock>>>(NUM_ROWS_PER_BATCH, deviceStates, 1, 0);
CUDA_CHECK(cudaPeekAtLastError());
CUDA_CHECK(cudaDeviceSynchronize());
// Process batches
int numRowsProcessed = 0;
// Can't use bool: https://stackoverflow.com/questions/8399417/why-vectorboolreference-doesnt-return-reference-to-bool
CSR<char> *processedRows = new CSR<char>();
for (int i = 0; i < numBatches; i++)
{
// Reset processed rows for new kernel
CUDA_CHECK(cudaMemset(deviceProcessedRows, false, sizeOfProcessedRows));
// Process the minimum number of rows
int numRowsToProcess = min(NUM_ROWS_PER_BATCH, (int)ceil(theta) - numRowsProcessed);
// Launch RR generation kernel
dimGrid = dim3(ceil(float(numRowsToProcess) / BLOCK_SIZE), 1, 1);
dimBlock = dim3(BLOCK_SIZE, 1, 1);
generate_rr_sets<<<dimGrid, dimBlock>>>(deviceDataFloat, deviceRows, deviceCols, deviceProcessedRows, deviceNodeHistogram, numNodes, numRowsToProcess, deviceStates);
CUDA_CHECK(cudaPeekAtLastError());
CUDA_CHECK(cudaDeviceSynchronize());
// Add to our running processedRows CSR using the rows and cols members
CUDA_CHECK(cudaMemcpy(hostProcessedRows, deviceProcessedRows, sizeOfProcessedRows, cudaMemcpyDeviceToHost));
for (int j = 0; j < numRowsToProcess; j++)
{
processedRows->rows.push_back(processedRows->data.size());
for (int k = 0; k < numNodes; k++)
{
if (hostProcessedRows[(unsigned long long int)j * numNodes + k])
{
processedRows->data.push_back(true);
processedRows->cols.push_back(k);
}
}
}
processedRows->rows.push_back(processedRows->data.size());
numRowsProcessed += numRowsToProcess;
}
// Copy our processedRows CSR to device
CUDA_CHECK(cudaFree(deviceRows));
CUDA_CHECK(cudaFree(deviceCols));
CUDA_CHECK(cudaFree(deviceDataFloat));
sizeOfData = processedRows->data.size() * sizeof(char);
sizeOfRows = processedRows->rows.size() * sizeof(int);
sizeOfCols = processedRows->cols.size() * sizeof(int);
CUDA_CHECK(cudaMalloc((void **)&deviceDataBool, sizeOfData));
CUDA_CHECK(cudaMalloc((void **)&deviceRows, sizeOfRows));
CUDA_CHECK(cudaMalloc((void **)&deviceCols, sizeOfCols));
CUDA_CHECK(cudaMemcpy(deviceDataBool, &(processedRows->data[0]), sizeOfData, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(deviceRows, &(processedRows->rows[0]), sizeOfRows, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(deviceCols, &(processedRows->cols[0]), sizeOfCols, cudaMemcpyHostToDevice));
// Initialize dimensions for count updating
unsigned int mostCommonNode;
dimGrid = dim3(ceil(float(numRowsProcessed) / BLOCK_SIZE), 1, 1);
dimBlock = dim3(BLOCK_SIZE, 1, 1);
// Copy out node histogram to host
CUDA_CHECK(cudaMemcpy(hostNodeHistogram, deviceNodeHistogram, sizeOfNodeHistogram, cudaMemcpyDeviceToHost));
// Select nodes using histogram and processedRows CSR
for (int j = 0; j < k - 1; j++)
{
mostCommonNode = maxIndex(hostNodeHistogram, numNodes);
seeds.insert(mostCommonNode);
update_counts<<<dimGrid, dimBlock>>>(deviceDataBool, deviceRows, deviceCols, deviceNodeHistogram, numRowsProcessed, numNodes, mostCommonNode);
CUDA_CHECK(cudaPeekAtLastError());
CUDA_CHECK(cudaDeviceSynchronize());
CUDA_CHECK(cudaMemcpy(hostNodeHistogram, deviceNodeHistogram, sizeOfNodeHistogram, cudaMemcpyDeviceToHost));
}
mostCommonNode = maxIndex(hostNodeHistogram, numNodes);
seeds.insert(mostCommonNode);
CUDA_CHECK(cudaFree(deviceDataBool));
CUDA_CHECK(cudaFree(deviceRows));
CUDA_CHECK(cudaFree(deviceCols));
CUDA_CHECK(cudaFree(deviceProcessedRows));
CUDA_CHECK(cudaFree(deviceStates));
free(hostProcessedRows);
delete processedRows;
return seeds;
}
int main(int argc, char **argv)
{
Benchmark b;
b.setNodeSelectionFunction(nodeSelection);
b.run();
return 0;
}
|
c2f108e81f6b65fc40ed1bb3984230b1209916a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by on 2018/8/7.
//
#include "gpu_chunk_reduction.h"
__device__ void
gpu_chunk_local_reduction(gpu_matrix *matrix, dimension max_dim, ScatterAllocator::AllocatorHandle *allocator) {
int thread_id = threadIdx.x + blockDim.x * blockIdx.x;
int block_id = blockIdx.x;
indx chunk_start = matrix->chunks_start_offset[block_id]; // ID of column who starts the chunk.
indx chunk_end = matrix->chunks_start_offset[block_id + 1]; // ID of column who ends the chunk.
indx chunk_size = chunk_end - chunk_start; // Size of the chunk.
indx target_col = -1;
bool ive_added; // To make sure each column will only increase the chunk_finish_col_num once
indx left_chunk_size = block_id != 0 ?
matrix->chunks_start_offset[block_id] - matrix->chunks_start_offset[block_id - 1] : 0;
for (dimension cur_dim = max_dim; cur_dim >= 1; cur_dim--) {
ive_added = false;
do {
gpu_check_lowest_one_locally(matrix, thread_id, block_id, chunk_start, chunk_start, cur_dim, &target_col, &ive_added);
gpu_add_two_cols_locally(matrix, thread_id, target_col, allocator);
target_col = -1;
__syncthreads(); // In fact, we don't even need this barrier but it helps us to improve efficiency by cutting down the number of loops.
} while (matrix->chunk_finish_col_num[block_id] < (block_id != 0 ? chunk_size * ((max_dim - cur_dim) * 2 + 1) : chunk_size * (max_dim - cur_dim + 1)));
// If all of columns have been done this term, we can jump out of the loop to do the next step.
// Chunk_finish_col_num will only be added once per loop.
gpu_mark_and_clean_locally(matrix, thread_id, chunk_start, cur_dim);
if (block_id != 0) { // Chunk 0 has no left neighbor who won't run following codes.
while (matrix->chunk_finish_col_num[block_id - 1] < (block_id != 1 ?
left_chunk_size * ((max_dim - cur_dim) * 2 + 1) :
left_chunk_size * (max_dim - cur_dim + 1))) {
__threadfence(); // Wait until the leftmost neighbor has done his work.
}
ive_added = false;
do {
gpu_check_lowest_one_locally(matrix, thread_id, block_id, chunk_start, chunk_start - 1, cur_dim, &target_col, &ive_added);
gpu_add_two_cols_locally(matrix, thread_id, target_col, allocator);
target_col = -1;
__syncthreads();
} while (matrix->chunk_finish_col_num[block_id] < chunk_size * (max_dim - cur_dim + 1) * 2);
gpu_mark_and_clean_locally(matrix, thread_id, chunk_start - 1, cur_dim);
}
}
matrix->data_length[thread_id] = (indx) matrix->data[thread_id].data_length;
}
__device__ void gpu_mark_active_column(gpu_matrix *matrix, dimension max_dim) {
int thread_id = threadIdx.x + blockDim.x * blockIdx.x;
int block_id = blockIdx.x;
indx chunk_start = matrix->chunks_start_offset[block_id];
indx chunk_end = matrix->chunks_start_offset[block_id + 1];
indx chunk_size = chunk_end - chunk_start;
// Please see the paper to know what happens below
// https://arxiv.org/pdf/1303.0477.pdf
bool im_done = false;
indx cur_row_idx = 0;
auto col = &matrix->data[thread_id];
do {
if (!im_done) {
if (mx_is_empty(matrix, thread_id) || cur_row_idx == col->data_length || matrix->column_type[thread_id] == GLOBAL) {
im_done = true;
matrix->is_active[thread_id] = matrix->column_type[thread_id] == GLOBAL;
matrix->is_ready_for_mark[thread_id] = true;
atomicAdd((unsigned long long *) &matrix->chunk_finish_col_num[block_id], (unsigned long long) 1);
} else {
indx cur_row = col->data[cur_row_idx];
if (matrix->column_type[cur_row] == GLOBAL) {
im_done = true;
matrix->is_active[thread_id] = true;
matrix->is_ready_for_mark[thread_id] = true;
atomicAdd((unsigned long long *) &matrix->chunk_finish_col_num[block_id], (unsigned long long) 1);
} else if (matrix->column_type[cur_row] == LOCAL_POSITIVE) {
indx cur_row_lowest_one = matrix->lowest_one_lookup[cur_row];
if (cur_row_lowest_one == thread_id || cur_row_lowest_one == -1) {
cur_row_idx++;
} else if (matrix->is_ready_for_mark[cur_row_lowest_one]) {
if (matrix->is_active[cur_row_lowest_one]) {
im_done = true;
matrix->is_active[thread_id] = true;
matrix->is_ready_for_mark[thread_id] = true;
atomicAdd((unsigned long long *) &matrix->chunk_finish_col_num[block_id], (unsigned long long) 1);
} else {
cur_row_idx++;
}
}
} else {
cur_row_idx++;
}
}
}
__syncthreads();
//if(thread_id == 0)
// printf("chunk_finish_col_num is %ld chunk_size is %ld max dim %ld\n", matrix->chunk_finish_col_num[block_id], chunk_size, max_dim);
} while (matrix->chunk_finish_col_num[block_id] < ((block_id == 0) ? (max_dim + 1) * chunk_size : (2 * max_dim + 1) * chunk_size));
// To exit when all columns have done their work
}
__device__ void gpu_add_two_cols_locally(gpu_matrix *matrix, indx my_col_id, indx target_col,
ScatterAllocator::AllocatorHandle *allocator) {
if (target_col == -1 || matrix->column_type[my_col_id] != GLOBAL || target_col == my_col_id) {
return;
}
mx_add_to(matrix, target_col, my_col_id, allocator);
}
__device__ void
gpu_check_lowest_one_locally(gpu_matrix *matrix, indx my_col_id, indx block_id, indx chunk_start, indx row_begin,
dimension cur_dim, indx *target_col, bool * ive_added) {
if (cur_dim != mx_get_dim(matrix, my_col_id) || matrix->column_type[my_col_id] != GLOBAL) {
if (!*ive_added) {
atomicAdd((unsigned long long *) &matrix->chunk_finish_col_num[block_id], (unsigned long long) 1);
*ive_added = true;
}
return;
}
indx my_lowest_one = mx_get_max_indx(matrix, my_col_id);
if (my_lowest_one >= row_begin) {
for (indx col_id = chunk_start; col_id < my_col_id; col_id++) {
indx this_lowest_one = mx_get_max_indx(matrix, col_id);
if (this_lowest_one == my_lowest_one) {
*target_col = col_id;
if (*ive_added) {
atomicAdd((unsigned long long *) &matrix->chunk_finish_col_num[block_id], (unsigned long long) -1);
// If a column has been set to unfinished again, it decrease the chunk_finish_col_num to let the loop go on
*ive_added = false;
}
return;
}
}
}
if (!*ive_added) {
atomicAdd((unsigned long long *) &matrix->chunk_finish_col_num[block_id], (unsigned long long) 1);
*ive_added = true;
}
}
__device__ void gpu_mark_and_clean_locally(gpu_matrix *matrix, indx my_col_id, indx row_begin,
dimension cur_dim) {
if (cur_dim != mx_get_dim(matrix, my_col_id) || matrix->column_type[my_col_id] != GLOBAL) {
return;
}
indx my_lowest_one = mx_get_max_indx(matrix, my_col_id);
if (matrix->lowest_one_lookup[my_lowest_one] == -1 && my_lowest_one >= row_begin) {
matrix->lowest_one_lookup[my_lowest_one] = my_col_id;
matrix->column_type[my_col_id] = LOCAL_NEGATIVE;
matrix->column_type[my_lowest_one] = LOCAL_POSITIVE;
mx_clear(matrix, my_lowest_one);
}
}
__device__ void gpu_column_simplification(gpu_matrix *matrix, indx my_col_id, ScatterAllocator::AllocatorHandle *allocator) {
if (matrix->column_type[my_col_id] != GLOBAL) {
matrix->is_ready_for_mark[my_col_id] = true;
return;
}
auto col = &matrix->data[my_col_id];
auto stack = initial_gpu_stack(col->data_length);
while(!mx_is_empty(matrix, my_col_id)) {
indx cur_row = mx_get_max_indx(matrix, my_col_id);
if (matrix->column_type[cur_row] == LOCAL_NEGATIVE) {
mx_remove_max(matrix, my_col_id);
} else if (matrix->column_type[cur_row] == LOCAL_POSITIVE) {
if (matrix->is_ready_for_mark[matrix->lowest_one_lookup[cur_row]]) {
if (matrix->is_active[matrix->lowest_one_lookup[cur_row]]) {
mx_add_to(matrix, matrix->lowest_one_lookup[cur_row], my_col_id, allocator);
} else {
mx_remove_max(matrix, my_col_id);
}
}
} else {
gpu_stack_push(stack, cur_row);
mx_remove_max(matrix, my_col_id);
}
}
if (stack->data_length > col->size) {
auto new_size = round_up_to_2s(stack->data_length);
auto new_data = (indx *) allocator->malloc(sizeof(indx) * new_size);
allocator->free(col->data);
col->data = new_data;
col->size = (size_t) new_size;
}
for (indx i = 0; i < stack->data_length; i++) {
col->data[stack->data_length - 1 - i] = stack->data[i];
}
col->data_length = stack->data_length;
matrix->is_ready_for_mark[my_col_id] = true;
__threadfence();
delete[] stack->data;
delete stack;
}
__device__ gpu_stack * initial_gpu_stack(size_t length) {
auto data_size = round_up_to_2s(length);
auto stack = new gpu_stack;
stack->data = new indx[data_size];
stack->data_length = 0;
stack->size = (size_t) data_size;
return stack;
}
__device__ void gpu_stack_push(gpu_stack * stack, indx elem) {
gpu_resize(stack, stack->data_length + 1);
stack->data[stack->data_length] = elem;
stack->data_length++;
}
__device__ void gpu_resize(gpu_stack * stack, size_t new_size) {
if (new_size > stack->size) {
auto new_data = new indx[stack->size * 2];
memcpy(new_data, stack->data, sizeof(indx) * stack->size);
delete[] stack->data;
stack->data = new_data;
stack->size = stack->size * 2;
}
} | c2f108e81f6b65fc40ed1bb3984230b1209916a4.cu | //
// Created by 唐艺峰 on 2018/8/7.
//
#include "gpu_chunk_reduction.h"
__device__ void
gpu_chunk_local_reduction(gpu_matrix *matrix, dimension max_dim, ScatterAllocator::AllocatorHandle *allocator) {
int thread_id = threadIdx.x + blockDim.x * blockIdx.x;
int block_id = blockIdx.x;
indx chunk_start = matrix->chunks_start_offset[block_id]; // ID of column who starts the chunk.
indx chunk_end = matrix->chunks_start_offset[block_id + 1]; // ID of column who ends the chunk.
indx chunk_size = chunk_end - chunk_start; // Size of the chunk.
indx target_col = -1;
bool ive_added; // To make sure each column will only increase the chunk_finish_col_num once
indx left_chunk_size = block_id != 0 ?
matrix->chunks_start_offset[block_id] - matrix->chunks_start_offset[block_id - 1] : 0;
for (dimension cur_dim = max_dim; cur_dim >= 1; cur_dim--) {
ive_added = false;
do {
gpu_check_lowest_one_locally(matrix, thread_id, block_id, chunk_start, chunk_start, cur_dim, &target_col, &ive_added);
gpu_add_two_cols_locally(matrix, thread_id, target_col, allocator);
target_col = -1;
__syncthreads(); // In fact, we don't even need this barrier but it helps us to improve efficiency by cutting down the number of loops.
} while (matrix->chunk_finish_col_num[block_id] < (block_id != 0 ? chunk_size * ((max_dim - cur_dim) * 2 + 1) : chunk_size * (max_dim - cur_dim + 1)));
// If all of columns have been done this term, we can jump out of the loop to do the next step.
// Chunk_finish_col_num will only be added once per loop.
gpu_mark_and_clean_locally(matrix, thread_id, chunk_start, cur_dim);
if (block_id != 0) { // Chunk 0 has no left neighbor who won't run following codes.
while (matrix->chunk_finish_col_num[block_id - 1] < (block_id != 1 ?
left_chunk_size * ((max_dim - cur_dim) * 2 + 1) :
left_chunk_size * (max_dim - cur_dim + 1))) {
__threadfence(); // Wait until the leftmost neighbor has done his work.
}
ive_added = false;
do {
gpu_check_lowest_one_locally(matrix, thread_id, block_id, chunk_start, chunk_start - 1, cur_dim, &target_col, &ive_added);
gpu_add_two_cols_locally(matrix, thread_id, target_col, allocator);
target_col = -1;
__syncthreads();
} while (matrix->chunk_finish_col_num[block_id] < chunk_size * (max_dim - cur_dim + 1) * 2);
gpu_mark_and_clean_locally(matrix, thread_id, chunk_start - 1, cur_dim);
}
}
matrix->data_length[thread_id] = (indx) matrix->data[thread_id].data_length;
}
__device__ void gpu_mark_active_column(gpu_matrix *matrix, dimension max_dim) {
int thread_id = threadIdx.x + blockDim.x * blockIdx.x;
int block_id = blockIdx.x;
indx chunk_start = matrix->chunks_start_offset[block_id];
indx chunk_end = matrix->chunks_start_offset[block_id + 1];
indx chunk_size = chunk_end - chunk_start;
// Please see the paper to know what happens below
// https://arxiv.org/pdf/1303.0477.pdf
bool im_done = false;
indx cur_row_idx = 0;
auto col = &matrix->data[thread_id];
do {
if (!im_done) {
if (mx_is_empty(matrix, thread_id) || cur_row_idx == col->data_length || matrix->column_type[thread_id] == GLOBAL) {
im_done = true;
matrix->is_active[thread_id] = matrix->column_type[thread_id] == GLOBAL;
matrix->is_ready_for_mark[thread_id] = true;
atomicAdd((unsigned long long *) &matrix->chunk_finish_col_num[block_id], (unsigned long long) 1);
} else {
indx cur_row = col->data[cur_row_idx];
if (matrix->column_type[cur_row] == GLOBAL) {
im_done = true;
matrix->is_active[thread_id] = true;
matrix->is_ready_for_mark[thread_id] = true;
atomicAdd((unsigned long long *) &matrix->chunk_finish_col_num[block_id], (unsigned long long) 1);
} else if (matrix->column_type[cur_row] == LOCAL_POSITIVE) {
indx cur_row_lowest_one = matrix->lowest_one_lookup[cur_row];
if (cur_row_lowest_one == thread_id || cur_row_lowest_one == -1) {
cur_row_idx++;
} else if (matrix->is_ready_for_mark[cur_row_lowest_one]) {
if (matrix->is_active[cur_row_lowest_one]) {
im_done = true;
matrix->is_active[thread_id] = true;
matrix->is_ready_for_mark[thread_id] = true;
atomicAdd((unsigned long long *) &matrix->chunk_finish_col_num[block_id], (unsigned long long) 1);
} else {
cur_row_idx++;
}
}
} else {
cur_row_idx++;
}
}
}
__syncthreads();
//if(thread_id == 0)
// printf("chunk_finish_col_num is %ld chunk_size is %ld max dim %ld\n", matrix->chunk_finish_col_num[block_id], chunk_size, max_dim);
} while (matrix->chunk_finish_col_num[block_id] < ((block_id == 0) ? (max_dim + 1) * chunk_size : (2 * max_dim + 1) * chunk_size));
// To exit when all columns have done their work
}
__device__ void gpu_add_two_cols_locally(gpu_matrix *matrix, indx my_col_id, indx target_col,
ScatterAllocator::AllocatorHandle *allocator) {
if (target_col == -1 || matrix->column_type[my_col_id] != GLOBAL || target_col == my_col_id) {
return;
}
mx_add_to(matrix, target_col, my_col_id, allocator);
}
__device__ void
gpu_check_lowest_one_locally(gpu_matrix *matrix, indx my_col_id, indx block_id, indx chunk_start, indx row_begin,
dimension cur_dim, indx *target_col, bool * ive_added) {
if (cur_dim != mx_get_dim(matrix, my_col_id) || matrix->column_type[my_col_id] != GLOBAL) {
if (!*ive_added) {
atomicAdd((unsigned long long *) &matrix->chunk_finish_col_num[block_id], (unsigned long long) 1);
*ive_added = true;
}
return;
}
indx my_lowest_one = mx_get_max_indx(matrix, my_col_id);
if (my_lowest_one >= row_begin) {
for (indx col_id = chunk_start; col_id < my_col_id; col_id++) {
indx this_lowest_one = mx_get_max_indx(matrix, col_id);
if (this_lowest_one == my_lowest_one) {
*target_col = col_id;
if (*ive_added) {
atomicAdd((unsigned long long *) &matrix->chunk_finish_col_num[block_id], (unsigned long long) -1);
// If a column has been set to unfinished again, it decrease the chunk_finish_col_num to let the loop go on
*ive_added = false;
}
return;
}
}
}
if (!*ive_added) {
atomicAdd((unsigned long long *) &matrix->chunk_finish_col_num[block_id], (unsigned long long) 1);
*ive_added = true;
}
}
__device__ void gpu_mark_and_clean_locally(gpu_matrix *matrix, indx my_col_id, indx row_begin,
dimension cur_dim) {
if (cur_dim != mx_get_dim(matrix, my_col_id) || matrix->column_type[my_col_id] != GLOBAL) {
return;
}
indx my_lowest_one = mx_get_max_indx(matrix, my_col_id);
if (matrix->lowest_one_lookup[my_lowest_one] == -1 && my_lowest_one >= row_begin) {
matrix->lowest_one_lookup[my_lowest_one] = my_col_id;
matrix->column_type[my_col_id] = LOCAL_NEGATIVE;
matrix->column_type[my_lowest_one] = LOCAL_POSITIVE;
mx_clear(matrix, my_lowest_one);
}
}
__device__ void gpu_column_simplification(gpu_matrix *matrix, indx my_col_id, ScatterAllocator::AllocatorHandle *allocator) {
if (matrix->column_type[my_col_id] != GLOBAL) {
matrix->is_ready_for_mark[my_col_id] = true;
return;
}
auto col = &matrix->data[my_col_id];
auto stack = initial_gpu_stack(col->data_length);
while(!mx_is_empty(matrix, my_col_id)) {
indx cur_row = mx_get_max_indx(matrix, my_col_id);
if (matrix->column_type[cur_row] == LOCAL_NEGATIVE) {
mx_remove_max(matrix, my_col_id);
} else if (matrix->column_type[cur_row] == LOCAL_POSITIVE) {
if (matrix->is_ready_for_mark[matrix->lowest_one_lookup[cur_row]]) {
if (matrix->is_active[matrix->lowest_one_lookup[cur_row]]) {
mx_add_to(matrix, matrix->lowest_one_lookup[cur_row], my_col_id, allocator);
} else {
mx_remove_max(matrix, my_col_id);
}
}
} else {
gpu_stack_push(stack, cur_row);
mx_remove_max(matrix, my_col_id);
}
}
if (stack->data_length > col->size) {
auto new_size = round_up_to_2s(stack->data_length);
auto new_data = (indx *) allocator->malloc(sizeof(indx) * new_size);
allocator->free(col->data);
col->data = new_data;
col->size = (size_t) new_size;
}
for (indx i = 0; i < stack->data_length; i++) {
col->data[stack->data_length - 1 - i] = stack->data[i];
}
col->data_length = stack->data_length;
matrix->is_ready_for_mark[my_col_id] = true;
__threadfence();
delete[] stack->data;
delete stack;
}
__device__ gpu_stack * initial_gpu_stack(size_t length) {
auto data_size = round_up_to_2s(length);
auto stack = new gpu_stack;
stack->data = new indx[data_size];
stack->data_length = 0;
stack->size = (size_t) data_size;
return stack;
}
__device__ void gpu_stack_push(gpu_stack * stack, indx elem) {
gpu_resize(stack, stack->data_length + 1);
stack->data[stack->data_length] = elem;
stack->data_length++;
}
__device__ void gpu_resize(gpu_stack * stack, size_t new_size) {
if (new_size > stack->size) {
auto new_data = new indx[stack->size * 2];
memcpy(new_data, stack->data, sizeof(indx) * stack->size);
delete[] stack->data;
stack->data = new_data;
stack->size = stack->size * 2;
}
} |
1e5b36f499cf79c0e733d5d326a40874ddba9ce0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/HIPApplyUtils.cuh>
/*#ifdef WITH_CUDA
#include "../box_iou_rotated/box_iou_rotated_utils.h"
#endif
// TODO avoid this when pytorch supports "same directory" hipification
#ifdef WITH_HIP
#include "box_iou_rotated/box_iou_rotated_utils.h"
#endif*/
#include "box_iou_rotated/box_iou_rotated_utils.h"
using namespace detectron2;
namespace {
int const threadsPerBlock = sizeof(unsigned long long) * 8;
}
template <typename T>
__global__ void nms_rotated_cuda_kernel(
const int n_boxes,
const float iou_threshold,
const T* dev_boxes,
unsigned long long* dev_mask) {
// nms_rotated_cuda_kernel is modified from torchvision's nms_cuda_kernel
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
// Compared to nms_cuda_kernel, where each box is represented with 4 values
// (x1, y1, x2, y2), each rotated box is represented with 5 values
// (x_center, y_center, width, height, angle_degrees) here.
__shared__ T block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const T* cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
// Instead of devIoU used by original horizontal nms, here
// we use the single_box_iou_rotated function from box_iou_rotated_utils.h
if (single_box_iou_rotated<T>(cur_box, block_boxes + i * 5) >
iou_threshold) {
t |= 1ULL << i;
}
}
const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
namespace detectron2 {
at::Tensor nms_rotated_cuda(
// input must be contiguous
const at::Tensor& dets,
const at::Tensor& scores,
float iou_threshold) {
// using scalar_t = float;
AT_ASSERTM(dets.is_cuda(), "dets must be a CUDA tensor");
AT_ASSERTM(scores.is_cuda(), "scores must be a CUDA tensor");
at::hip::HIPGuardMasqueradingAsCUDA device_guard(dets.device());
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto dets_sorted = dets.index_select(0, order_t);
auto dets_num = dets.size(0);
const int col_blocks =
at::cuda::ATenCeilDiv(static_cast<int>(dets_num), threadsPerBlock);
at::Tensor mask =
at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong));
dim3 blocks(col_blocks, col_blocks);
dim3 threads(threadsPerBlock);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES(
dets_sorted.scalar_type(), "nms_rotated_kernel_cuda", [&] {
hipLaunchKernelGGL(( nms_rotated_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
dets_num,
iou_threshold,
dets_sorted.data_ptr<scalar_t>(),
(unsigned long long*)mask.data_ptr<int64_t>());
});
at::Tensor mask_cpu = mask.to(at::kCPU);
unsigned long long* mask_host =
(unsigned long long*)mask_cpu.data_ptr<int64_t>();
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep =
at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data_ptr<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < dets_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long* p = mask_host + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
AT_CUDA_CHECK(hipGetLastError());
return order_t.index(
{keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep)
.to(order_t.device(), keep.scalar_type())});
}
} // namespace detectron2
| 1e5b36f499cf79c0e733d5d326a40874ddba9ce0.cu | // Copyright (c) Facebook, Inc. and its affiliates.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
/*#ifdef WITH_CUDA
#include "../box_iou_rotated/box_iou_rotated_utils.h"
#endif
// TODO avoid this when pytorch supports "same directory" hipification
#ifdef WITH_HIP
#include "box_iou_rotated/box_iou_rotated_utils.h"
#endif*/
#include "box_iou_rotated/box_iou_rotated_utils.h"
using namespace detectron2;
namespace {
int const threadsPerBlock = sizeof(unsigned long long) * 8;
}
template <typename T>
__global__ void nms_rotated_cuda_kernel(
const int n_boxes,
const float iou_threshold,
const T* dev_boxes,
unsigned long long* dev_mask) {
// nms_rotated_cuda_kernel is modified from torchvision's nms_cuda_kernel
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
// Compared to nms_cuda_kernel, where each box is represented with 4 values
// (x1, y1, x2, y2), each rotated box is represented with 5 values
// (x_center, y_center, width, height, angle_degrees) here.
__shared__ T block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const T* cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
// Instead of devIoU used by original horizontal nms, here
// we use the single_box_iou_rotated function from box_iou_rotated_utils.h
if (single_box_iou_rotated<T>(cur_box, block_boxes + i * 5) >
iou_threshold) {
t |= 1ULL << i;
}
}
const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
namespace detectron2 {
at::Tensor nms_rotated_cuda(
// input must be contiguous
const at::Tensor& dets,
const at::Tensor& scores,
float iou_threshold) {
// using scalar_t = float;
AT_ASSERTM(dets.is_cuda(), "dets must be a CUDA tensor");
AT_ASSERTM(scores.is_cuda(), "scores must be a CUDA tensor");
at::cuda::CUDAGuard device_guard(dets.device());
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto dets_sorted = dets.index_select(0, order_t);
auto dets_num = dets.size(0);
const int col_blocks =
at::cuda::ATenCeilDiv(static_cast<int>(dets_num), threadsPerBlock);
at::Tensor mask =
at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong));
dim3 blocks(col_blocks, col_blocks);
dim3 threads(threadsPerBlock);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES(
dets_sorted.scalar_type(), "nms_rotated_kernel_cuda", [&] {
nms_rotated_cuda_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
dets_num,
iou_threshold,
dets_sorted.data_ptr<scalar_t>(),
(unsigned long long*)mask.data_ptr<int64_t>());
});
at::Tensor mask_cpu = mask.to(at::kCPU);
unsigned long long* mask_host =
(unsigned long long*)mask_cpu.data_ptr<int64_t>();
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep =
at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data_ptr<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < dets_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long* p = mask_host + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
AT_CUDA_CHECK(cudaGetLastError());
return order_t.index(
{keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep)
.to(order_t.device(), keep.scalar_type())});
}
} // namespace detectron2
|
783a24dba56847a4a85b5fc6725fd09c9eea5580.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <HiParTI.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include "sptensor.h"
#include "mmul_cuda_kernels.h"
int ptiCudaSparseTensorMulMatrix(
ptiSemiSparseTensor *Y,
ptiSparseTensor *X,
const ptiMatrix *U,
ptiIndex const mode
) {
int result;
ptiIndex *ind_buf;
ptiIndex m;
ptiNnzIndexVector fiberidx;
if(mode >= X->nmodes) {
pti_CheckError(PTIERR_SHAPE_MISMATCH, "CUDA SpTns * Mtx", "shape mismatch");
}
if(X->ndims[mode] != U->nrows) {
pti_CheckError(PTIERR_SHAPE_MISMATCH, "CUDA SpTns * Mtx", "shape mismatch");
}
ptiSparseTensorSortIndexAtMode(X, mode, 0);
ind_buf = new ptiIndex[X->nmodes * sizeof *ind_buf];
for(m = 0; m < X->nmodes; ++m) {
ind_buf[m] = X->ndims[m];
}
ind_buf[mode] = U->ncols;
result = ptiNewSemiSparseTensor(Y, X->nmodes, mode, ind_buf);
delete[] ind_buf;
pti_CheckError(result, "CUDA SpTns * Mtx", NULL);
ptiSemiSparseTensorSetIndices(Y, &fiberidx, X);
ptiValue *Y_val = NULL;
result = hipMalloc((void **) &Y_val, Y->nnz * Y->stride * sizeof (ptiValue));
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx");
// jli: Add memset to Y.
hipMemset(Y_val, 0, Y->nnz * Y->stride * sizeof (ptiValue));
ptiValue *X_val = NULL;
result = hipMalloc((void **) &X_val, X->nnz * sizeof (ptiValue));
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx");
hipMemcpy(X_val, X->values.data, X->nnz * sizeof (ptiValue), hipMemcpyHostToDevice);
ptiIndex *X_inds_m = NULL;
result = hipMalloc((void **) &X_inds_m, X->nnz * sizeof (ptiValue));
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx");
hipMemcpy(X_inds_m, X->inds[mode].data, X->nnz * sizeof (ptiValue), hipMemcpyHostToDevice);
ptiValue *U_val = NULL;
result = hipMalloc((void **) &U_val, U->nrows * U->stride * sizeof (ptiValue));
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx");
hipMemcpy(U_val, U->values, U->nrows * U->stride * sizeof (ptiValue), hipMemcpyHostToDevice);
ptiNnzIndex *fiberidx_val = NULL;
result = hipMalloc((void **) &fiberidx_val, fiberidx.len * sizeof (ptiNnzIndex));
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx");
hipMemcpy(fiberidx_val, fiberidx.data, fiberidx.len * sizeof (ptiNnzIndex), hipMemcpyHostToDevice);
const char *env_PARTI_TTM_KERNEL = getenv("PARTI_TTM_KERNEL");
const bool use_naive_kernel = env_PARTI_TTM_KERNEL && !strcmp(env_PARTI_TTM_KERNEL, "naive");
const ptiNnzIndex max_nblocks = 32768;
const ptiNnzIndex max_nthreads = 1024;
// size_t sharedMem = (Y->ndims[mode] + X->ndims[mode])*sizeof (ptiScalar) + X->ndims[mode]*sizeof (size_t);
const char *env_PARTI_TTM_NTHREADS = getenv("PARTI_TTM_NTHREADS");
ptiNnzIndex nthreadsX = 32;
if(env_PARTI_TTM_NTHREADS) {
sscanf(env_PARTI_TTM_NTHREADS, "%lu", &nthreadsX);
}
ptiNnzIndex sharedMem = nthreadsX * Y->stride * sizeof (ptiValue);
ptiNnzIndex all_nblocks = Y->nnz % nthreadsX == 0 ? Y->nnz / nthreadsX : Y->nnz / nthreadsX + 1;
assert(U->ncols < max_nthreads);
dim3 dimBlock(nthreadsX, U->ncols);
// size_t nblocks = Y->nnz < max_nblocks ? Y->nnz : max_nblocks;
if(!use_naive_kernel) {
fprintf(stderr, "[CUDA SpTns * Mtx] pti_TTMKernel<<<%zu, (%u, %u), %zu>>>\n", all_nblocks, dimBlock.x, dimBlock.y, sharedMem);
} else {
fprintf(stderr, "[CUDA SpTns * Mtx] pti_TTMNaiveKernel<<<%zu, (%u, %u), 0>>>\n", all_nblocks, dimBlock.x, dimBlock.y);
}
ptiTimer timer;
ptiNewTimer(&timer, 0);
ptiStartTimer(timer);
for(ptiNnzIndex block_offset = 0; block_offset < all_nblocks; block_offset += max_nblocks) {
ptiNnzIndex nblocks = all_nblocks - block_offset;
if(nblocks > max_nblocks) {
nblocks = max_nblocks;
}
if(!use_naive_kernel) {
hipLaunchKernelGGL(( pti_TTMKernel), dim3(nblocks), dim3(dimBlock), sharedMem, 0,
Y_val, Y->stride, Y->nnz,
X_val, X->nnz, X_inds_m,
fiberidx_val, fiberidx.len,
U_val, U->nrows, U->ncols, U->stride,
block_offset
);
} else {
hipLaunchKernelGGL(( pti_TTMNaiveKernel), dim3(nblocks), dim3(dimBlock), 0, 0,
Y_val, Y->stride, Y->nnz,
X_val, X->nnz, X_inds_m,
fiberidx_val, fiberidx.len,
U_val, U->nrows, U->ncols, U->stride,
block_offset
);
}
result = hipDeviceSynchronize();
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx kernel");
}
ptiStopTimer(timer);
ptiPrintElapsedTime(timer, "CUDA SpTns * Mtx");
ptiFreeTimer(timer);
hipMemcpy(Y->values.values, Y_val, Y->nnz * Y->stride * sizeof (ptiValue), hipMemcpyDeviceToHost);
result = hipFree(fiberidx_val);
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx");
result = hipFree(U_val);
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx");
result = hipFree(X_inds_m);
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx");
result = hipFree(X_val);
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx");
result = hipFree(Y_val);
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx");
ptiFreeNnzIndexVector(&fiberidx);
return 0;
}
| 783a24dba56847a4a85b5fc6725fd09c9eea5580.cu | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <HiParTI.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include "sptensor.h"
#include "mmul_cuda_kernels.h"
int ptiCudaSparseTensorMulMatrix(
ptiSemiSparseTensor *Y,
ptiSparseTensor *X,
const ptiMatrix *U,
ptiIndex const mode
) {
int result;
ptiIndex *ind_buf;
ptiIndex m;
ptiNnzIndexVector fiberidx;
if(mode >= X->nmodes) {
pti_CheckError(PTIERR_SHAPE_MISMATCH, "CUDA SpTns * Mtx", "shape mismatch");
}
if(X->ndims[mode] != U->nrows) {
pti_CheckError(PTIERR_SHAPE_MISMATCH, "CUDA SpTns * Mtx", "shape mismatch");
}
ptiSparseTensorSortIndexAtMode(X, mode, 0);
ind_buf = new ptiIndex[X->nmodes * sizeof *ind_buf];
for(m = 0; m < X->nmodes; ++m) {
ind_buf[m] = X->ndims[m];
}
ind_buf[mode] = U->ncols;
result = ptiNewSemiSparseTensor(Y, X->nmodes, mode, ind_buf);
delete[] ind_buf;
pti_CheckError(result, "CUDA SpTns * Mtx", NULL);
ptiSemiSparseTensorSetIndices(Y, &fiberidx, X);
ptiValue *Y_val = NULL;
result = cudaMalloc((void **) &Y_val, Y->nnz * Y->stride * sizeof (ptiValue));
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx");
// jli: Add memset to Y.
cudaMemset(Y_val, 0, Y->nnz * Y->stride * sizeof (ptiValue));
ptiValue *X_val = NULL;
result = cudaMalloc((void **) &X_val, X->nnz * sizeof (ptiValue));
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx");
cudaMemcpy(X_val, X->values.data, X->nnz * sizeof (ptiValue), cudaMemcpyHostToDevice);
ptiIndex *X_inds_m = NULL;
result = cudaMalloc((void **) &X_inds_m, X->nnz * sizeof (ptiValue));
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx");
cudaMemcpy(X_inds_m, X->inds[mode].data, X->nnz * sizeof (ptiValue), cudaMemcpyHostToDevice);
ptiValue *U_val = NULL;
result = cudaMalloc((void **) &U_val, U->nrows * U->stride * sizeof (ptiValue));
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx");
cudaMemcpy(U_val, U->values, U->nrows * U->stride * sizeof (ptiValue), cudaMemcpyHostToDevice);
ptiNnzIndex *fiberidx_val = NULL;
result = cudaMalloc((void **) &fiberidx_val, fiberidx.len * sizeof (ptiNnzIndex));
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx");
cudaMemcpy(fiberidx_val, fiberidx.data, fiberidx.len * sizeof (ptiNnzIndex), cudaMemcpyHostToDevice);
const char *env_PARTI_TTM_KERNEL = getenv("PARTI_TTM_KERNEL");
const bool use_naive_kernel = env_PARTI_TTM_KERNEL && !strcmp(env_PARTI_TTM_KERNEL, "naive");
const ptiNnzIndex max_nblocks = 32768;
const ptiNnzIndex max_nthreads = 1024;
// size_t sharedMem = (Y->ndims[mode] + X->ndims[mode])*sizeof (ptiScalar) + X->ndims[mode]*sizeof (size_t);
const char *env_PARTI_TTM_NTHREADS = getenv("PARTI_TTM_NTHREADS");
ptiNnzIndex nthreadsX = 32;
if(env_PARTI_TTM_NTHREADS) {
sscanf(env_PARTI_TTM_NTHREADS, "%lu", &nthreadsX);
}
ptiNnzIndex sharedMem = nthreadsX * Y->stride * sizeof (ptiValue);
ptiNnzIndex all_nblocks = Y->nnz % nthreadsX == 0 ? Y->nnz / nthreadsX : Y->nnz / nthreadsX + 1;
assert(U->ncols < max_nthreads);
dim3 dimBlock(nthreadsX, U->ncols);
// size_t nblocks = Y->nnz < max_nblocks ? Y->nnz : max_nblocks;
if(!use_naive_kernel) {
fprintf(stderr, "[CUDA SpTns * Mtx] pti_TTMKernel<<<%zu, (%u, %u), %zu>>>\n", all_nblocks, dimBlock.x, dimBlock.y, sharedMem);
} else {
fprintf(stderr, "[CUDA SpTns * Mtx] pti_TTMNaiveKernel<<<%zu, (%u, %u), 0>>>\n", all_nblocks, dimBlock.x, dimBlock.y);
}
ptiTimer timer;
ptiNewTimer(&timer, 0);
ptiStartTimer(timer);
for(ptiNnzIndex block_offset = 0; block_offset < all_nblocks; block_offset += max_nblocks) {
ptiNnzIndex nblocks = all_nblocks - block_offset;
if(nblocks > max_nblocks) {
nblocks = max_nblocks;
}
if(!use_naive_kernel) {
pti_TTMKernel<<<nblocks, dimBlock, sharedMem>>>(
Y_val, Y->stride, Y->nnz,
X_val, X->nnz, X_inds_m,
fiberidx_val, fiberidx.len,
U_val, U->nrows, U->ncols, U->stride,
block_offset
);
} else {
pti_TTMNaiveKernel<<<nblocks, dimBlock>>>(
Y_val, Y->stride, Y->nnz,
X_val, X->nnz, X_inds_m,
fiberidx_val, fiberidx.len,
U_val, U->nrows, U->ncols, U->stride,
block_offset
);
}
result = cudaThreadSynchronize();
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx kernel");
}
ptiStopTimer(timer);
ptiPrintElapsedTime(timer, "CUDA SpTns * Mtx");
ptiFreeTimer(timer);
cudaMemcpy(Y->values.values, Y_val, Y->nnz * Y->stride * sizeof (ptiValue), cudaMemcpyDeviceToHost);
result = cudaFree(fiberidx_val);
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx");
result = cudaFree(U_val);
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx");
result = cudaFree(X_inds_m);
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx");
result = cudaFree(X_val);
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx");
result = cudaFree(Y_val);
pti_CheckCudaError(result != 0, "CUDA SpTns * Mtx");
ptiFreeNnzIndexVector(&fiberidx);
return 0;
}
|
a895ae62b79b72522cc4aca68695b9dc7bfae9b6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
//#include <hip/hip_runtime.h>
#include "cuda_wrapper.h"
__global__ void print_from_gpu(void)
{
printf("Device:Hello World! from thread [%d, %d]\
From device\n", threadIdx.x, blockIdx.x);
}
namespace Wrapper
{
void wrapper(void)
{
printf("Host: Hello, world!\n");
hipLaunchKernelGGL(( print_from_gpu), dim3(2),dim3(1), 0, 0, );
hipDeviceSynchronize();
}
}
| a895ae62b79b72522cc4aca68695b9dc7bfae9b6.cu | #include <stdio.h>
#include <stdlib.h>
//#include <cuda.h>
#include "cuda_wrapper.h"
__global__ void print_from_gpu(void)
{
printf("Device:Hello World! from thread [%d, %d]\
From device\n", threadIdx.x, blockIdx.x);
}
namespace Wrapper
{
void wrapper(void)
{
printf("Host: Hello, world!\n");
print_from_gpu<<<2,1>>>();
cudaDeviceSynchronize();
}
}
|
da0a16b122af6e84f7fe2a13327d1657f2b1c97b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <thrust/random.h>
#include <thrust/sort.h>
#include <iostream>
#include <vector>
#include "paddle/fluid/framework/ddim.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/sample_prob.h"
#include "paddle/fluid/operators/math/sampler.h"
namespace paddle {
namespace operators {
namespace math {
using Tensor = framework::Tensor;
template <typename T>
__device__ T gpu_adjust_prob(const T prob, const int num_samples,
const int num_tries) {
if (num_samples == num_tries) {
return prob * num_samples;
} else {
return -expm1(num_tries * log1p(-prob));
}
}
class GPULogUniformSampler {
public:
__device__ int64_t Sample(float random, const int range,
const float log_range) const;
__device__ float Probability(int64_t value, const float log_range) const;
};
__device__ int64_t GPULogUniformSampler::Sample(float random, const int range,
const float log_range) const {
// Got Log Uniform distribution from uniform distribution by
// inverse_transform_sampling method
const int64_t value = static_cast<int64_t>(exp(random * log_range)) - 1;
// Mathematically, value should be <= range_, but might not be due to some
// floating point roundoff, so we mod by range_.
return value % range;
}
__device__ float GPULogUniformSampler::Probability(
int64_t value, const float log_range) const {
// Given f(x) = 1/[(x+1) * log_range_]
// The value's probability is integral of f(x) from value to (value + 1)
return (log((value + 2.0) / (value + 1.0))) / log_range;
}
template <typename T>
__global__ void SamplingCondidate(
const size_t n, const int num_tries, const int range, const float log_range,
const int num_true, const std::size_t num_samples,
const int64_t* label_data, int64_t* samples_data, T* probabilities_data) {
const int num_sampled_classes = num_true + num_samples;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int step_size = 0;
GPULogUniformSampler sampler;
for (; idx < n; idx += blockDim.x * gridDim.x) {
int col_idx = idx % num_sampled_classes;
int row_idx = idx / num_sampled_classes;
if (col_idx < num_true) {
samples_data[idx] = label_data[row_idx * num_true + col_idx];
} else {
samples_data[idx] = samples_data[col_idx];
}
probabilities_data[idx] = sampler.Probability(samples_data[idx], log_range);
probabilities_data[idx] =
gpu_adjust_prob(probabilities_data[idx], num_samples, num_tries);
}
}
template <typename T>
int UniqSampler(const Sampler& sampler, const std::size_t num_samples,
int64_t* samples_data) {
// sample num_samles unique samples for an example, note that they are not
// all negative samples
std::unordered_set<int64_t> tmp_samples;
tmp_samples.clear();
int num_tries = 0;
int j = 0;
while (j < num_samples) {
++num_tries;
auto v = sampler.Sample();
auto insert_ok = tmp_samples.insert(v).second;
if (!insert_ok) {
continue;
}
samples_data[j] = v;
++j;
}
return num_tries;
}
template <typename T>
void GPUSampleWithProb<T>::operator()(
const platform::CUDADeviceContext& context, const int seed,
const int dict_size, const bool uniq, const std::size_t num_samples,
const Tensor* L, Tensor* S, Tensor* P) {
// UNDERSTAND: dimension issues
const auto lbl_dim = L->dims();
const int batch_size = lbl_dim[0];
const int num_true = lbl_dim[1];
const int num_sampled_classes = num_true + num_samples;
framework::DDim ret_dim{batch_size, num_sampled_classes};
// UNDERSTAND: raw data view
const int64_t* label_data = L->data<int64_t>();
int64_t* samples_data = S->data<int64_t>();
T* probabilities_data = P->data<T>();
int s_size = num_samples;
framework::DDim s_dim{s_size};
Tensor s;
int64_t* s_data = s.mutable_data<int64_t>(s_dim, platform::CPUPlace());
math::LogUniformSampler sampler(dict_size, seed);
int range = dict_size;
float log_range = log(range + 1);
int num_tries = UniqSampler<T>(sampler, num_samples, s_data);
VLOG(1) << "num_tries: " << num_tries;
PADDLE_ENFORCE(hipMemcpy(samples_data + num_true, s_data,
sizeof(int64_t) * num_samples,
hipMemcpyHostToDevice));
int threads = 512;
const size_t size = batch_size * num_sampled_classes;
int grid = (batch_size * num_sampled_classes + threads - 1) / threads;
hipLaunchKernelGGL(( SamplingCondidate<T>), dim3(grid), dim3(threads), 0, context.stream(),
size, num_tries, range, log_range, num_true, num_samples, label_data,
samples_data, probabilities_data);
}
template class GPUSampleWithProb<float>;
template class GPUSampleWithProb<double>;
} // namespace math
} // namespace operators
} // namespace paddle
| da0a16b122af6e84f7fe2a13327d1657f2b1c97b.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <thrust/random.h>
#include <thrust/sort.h>
#include <iostream>
#include <vector>
#include "paddle/fluid/framework/ddim.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/sample_prob.h"
#include "paddle/fluid/operators/math/sampler.h"
namespace paddle {
namespace operators {
namespace math {
using Tensor = framework::Tensor;
template <typename T>
__device__ T gpu_adjust_prob(const T prob, const int num_samples,
const int num_tries) {
if (num_samples == num_tries) {
return prob * num_samples;
} else {
return -expm1(num_tries * log1p(-prob));
}
}
class GPULogUniformSampler {
public:
__device__ int64_t Sample(float random, const int range,
const float log_range) const;
__device__ float Probability(int64_t value, const float log_range) const;
};
__device__ int64_t GPULogUniformSampler::Sample(float random, const int range,
const float log_range) const {
// Got Log Uniform distribution from uniform distribution by
// inverse_transform_sampling method
const int64_t value = static_cast<int64_t>(exp(random * log_range)) - 1;
// Mathematically, value should be <= range_, but might not be due to some
// floating point roundoff, so we mod by range_.
return value % range;
}
__device__ float GPULogUniformSampler::Probability(
int64_t value, const float log_range) const {
// Given f(x) = 1/[(x+1) * log_range_]
// The value's probability is integral of f(x) from value to (value + 1)
return (log((value + 2.0) / (value + 1.0))) / log_range;
}
template <typename T>
__global__ void SamplingCondidate(
const size_t n, const int num_tries, const int range, const float log_range,
const int num_true, const std::size_t num_samples,
const int64_t* label_data, int64_t* samples_data, T* probabilities_data) {
const int num_sampled_classes = num_true + num_samples;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int step_size = 0;
GPULogUniformSampler sampler;
for (; idx < n; idx += blockDim.x * gridDim.x) {
int col_idx = idx % num_sampled_classes;
int row_idx = idx / num_sampled_classes;
if (col_idx < num_true) {
samples_data[idx] = label_data[row_idx * num_true + col_idx];
} else {
samples_data[idx] = samples_data[col_idx];
}
probabilities_data[idx] = sampler.Probability(samples_data[idx], log_range);
probabilities_data[idx] =
gpu_adjust_prob(probabilities_data[idx], num_samples, num_tries);
}
}
template <typename T>
int UniqSampler(const Sampler& sampler, const std::size_t num_samples,
int64_t* samples_data) {
// sample num_samles unique samples for an example, note that they are not
// all negative samples
std::unordered_set<int64_t> tmp_samples;
tmp_samples.clear();
int num_tries = 0;
int j = 0;
while (j < num_samples) {
++num_tries;
auto v = sampler.Sample();
auto insert_ok = tmp_samples.insert(v).second;
if (!insert_ok) {
continue;
}
samples_data[j] = v;
++j;
}
return num_tries;
}
template <typename T>
void GPUSampleWithProb<T>::operator()(
const platform::CUDADeviceContext& context, const int seed,
const int dict_size, const bool uniq, const std::size_t num_samples,
const Tensor* L, Tensor* S, Tensor* P) {
// UNDERSTAND: dimension issues
const auto lbl_dim = L->dims();
const int batch_size = lbl_dim[0];
const int num_true = lbl_dim[1];
const int num_sampled_classes = num_true + num_samples;
framework::DDim ret_dim{batch_size, num_sampled_classes};
// UNDERSTAND: raw data view
const int64_t* label_data = L->data<int64_t>();
int64_t* samples_data = S->data<int64_t>();
T* probabilities_data = P->data<T>();
int s_size = num_samples;
framework::DDim s_dim{s_size};
Tensor s;
int64_t* s_data = s.mutable_data<int64_t>(s_dim, platform::CPUPlace());
math::LogUniformSampler sampler(dict_size, seed);
int range = dict_size;
float log_range = log(range + 1);
int num_tries = UniqSampler<T>(sampler, num_samples, s_data);
VLOG(1) << "num_tries: " << num_tries;
PADDLE_ENFORCE(cudaMemcpy(samples_data + num_true, s_data,
sizeof(int64_t) * num_samples,
cudaMemcpyHostToDevice));
int threads = 512;
const size_t size = batch_size * num_sampled_classes;
int grid = (batch_size * num_sampled_classes + threads - 1) / threads;
SamplingCondidate<T><<<grid, threads, 0, context.stream()>>>(
size, num_tries, range, log_range, num_true, num_samples, label_data,
samples_data, probabilities_data);
}
template class GPUSampleWithProb<float>;
template class GPUSampleWithProb<double>;
} // namespace math
} // namespace operators
} // namespace paddle
|
7821e5a10b28d091ee5b076f369c4269c72c0e51.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zgeqr2x_gpu-v4.cu, normal z -> d, Mon Jun 25 18:24:14 2018
*/
#include "magma_internal.h"
#include "commonblas_d.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
/***************************************************************************//**
Purpose
-------
DGEQR2 computes a QR factorization of a real m by n matrix A:
A = Q * R.
This expert routine requires two more arguments than the standard
dgeqr2, namely, dT and ddA, explained below. The storage for A is
also not as in the LAPACK's dgeqr2 routine (see below).
The first is used to output the triangular
n x n factor T of the block reflector used in the factorization.
The second holds the diagonal nxn blocks of A, i.e., the diagonal
submatrices of R. This routine implements the left looking QR.
This version adds internal blocking.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in,out]
dA DOUBLE PRECISION array, dimension (LDA,N)
On entry, the m by n matrix A.
On exit, the orthogonal matrix Q as a
product of elementary reflectors (see Further Details).
\n
the elements on and above the diagonal of the array
contain the min(m,n) by n upper trapezoidal matrix R (R is
upper triangular if m >= n); the elements below the diagonal,
with the array TAU, represent the orthogonal matrix Q as a
product of elementary reflectors (see Further Details).
@param[in]
ldda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
dtau DOUBLE PRECISION array, dimension (min(M,N))
The scalar factors of the elementary reflectors (see Further
Details).
@param[out]
dT DOUBLE PRECISION array, dimension N x N.
Stores the triangular N x N factor T of the block reflector
used in the factorization. The lower triangular part is 0.
@param[out]
ddA DOUBLE PRECISION array, dimension N x N.
Stores the elements of the upper N x N diagonal block of A.
LAPACK stores this array in A. There are 0s below the diagonal.
@param
dwork (workspace) DOUBLE PRECISION array, dimension (3 N)
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
@param[in]
queue magma_queue_t
Queue to execute in.
Further Details
---------------
The matrix Q is represented as a product of elementary reflectors
Q = H(1) H(2) . . . H(k), where k = min(m,n).
Each H(i) has the form
H(i) = I - tau * v * v**H
where tau is a real scalar, and v is a real vector with
v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
and tau in TAU(i).
@ingroup magma_geqr2
*******************************************************************************/
extern "C" magma_int_t
magma_dgeqr2x4_gpu(
magma_int_t m, magma_int_t n,
magmaDouble_ptr dA, magma_int_t ldda,
magmaDouble_ptr dtau,
magmaDouble_ptr dT,
magmaDouble_ptr ddA,
magmaDouble_ptr dwork,
magma_queue_t queue,
magma_int_t *info)
{
#define dA(i_,j_) (dA + (j_)*(ldda) + (i_))
#define dT(i_,j_) (dT + (j_)*(k) + (i_))
#define BS 32
magma_int_t i, k;
magmaDouble_ptr dnorm = (magmaDouble_ptr)dwork;
magmaDouble_ptr dwork2 = (magmaDouble_ptr)(dwork + 2*n);
*info = 0;
if (m < 0) {
*info = -1;
} else if (n < 0) {
*info = -2;
} else if (ldda < max(1,m)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
/* Compute the norms of the trailing columns */
k = min(m,n);
magmablas_dnrm2_cols( m, k, dA(0,0), ldda, dnorm, queue );
for (magma_int_t b=0; b < k; b += BS) {
for (i = b; i < min(k, b+BS); ++i) {
/* Apply H**H to A(:,i) from the left */
if (i-b > 0) {
/* Compute the (i-1)th column of T */
if ( i-1 > 0 ) {
hipLaunchKernelGGL(( magma_dgemv_kernel3)
, dim3(i-1), dim3(BLOCK_SIZE), 0, queue->cuda_stream() ,
m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), dwork2, dtau+i-1);
hipLaunchKernelGGL(( magma_dtrmv_kernel2)
, dim3(i-1), dim3(i-1), 0, queue->cuda_stream() ,
dT(0,0), k, dwork2, dT(0,i-1), dtau+i-1);
}
/* dwork = V**H c */
hipLaunchKernelGGL(( magma_dgemv_kernel1)
, dim3(i-b), dim3(BLOCK_SIZE), 0, queue->cuda_stream() ,
m-b, dA(b, b), ldda, dA(b,i), dwork2);
/* dwork = T**H dwork2 */
hipLaunchKernelGGL(( magma_dtrmv_tkernel)
, dim3(i-b), dim3(i-b), 0, queue->cuda_stream() ,
dT(b,b), k, dwork2, dwork2+i-b);
/* c = c - V dwork2 */
if ( m-b > 0 ) {
dim3 blocks3( magma_ceildiv( m-b, BLOCK_SIZE ) );
dim3 threads3( BLOCK_SIZE );
hipLaunchKernelGGL(( magma_dgemv_kernel2)
, dim3(blocks3), dim3(threads3), 0, queue->cuda_stream() ,
m-b, i-b, dA(b,b), ldda, dwork2+i-b, dA(b, i));
}
}
/* Adjust the dnorm[i] to hold the norm of A(i:m,i) */
if ( i > 0 ) {
hipLaunchKernelGGL(( magma_dnrm2_adjust_kernel)
, dim3(1), dim3(i), 0, queue->cuda_stream() ,
dnorm+i, dA(0, i));
}
/* Generate elementary reflector H(i) to annihilate A(i+1:m,i)
1. 1 is not yet put on the diagonal of A
2. Elements above the diagonal are copied in ddA and
the ones in A are set to zero
3. update T */
magma_dlarfgx_gpu( m-i, dA(i, i), dA(min(i+1,m),i), dtau+i,
dnorm+i, ddA + i + i*n, i, queue );
if (i == 0) {
double tt = MAGMA_D_ONE;
magmablas_dlacpy( MagmaFull, 1, 1, dtau, 1, dT(0,0), 1, queue );
magma_dsetmatrix_async(1, 1, &tt, 1, dA(i, i), 1, queue );
}
}
if ( i-1 > 0 ) {
hipLaunchKernelGGL(( magma_dgemv_kernel3)
, dim3(i-1), dim3(BLOCK_SIZE), 0, queue->cuda_stream() ,
m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), dwork2, dtau+i-1);
hipLaunchKernelGGL(( magma_dtrmv_kernel2)
, dim3(i-1), dim3(i-1), 0, queue->cuda_stream() ,
dT(0,0), k, dwork2, dT(0,i-1), dtau+i-1);
}
/* Apply the transformations to the trailing matrix. */
//magma_dlarfb2_gpu( MagmaLeft, MagmaConjTrans, MagmaForward, MagmaColumnwise,
magma_dlarfb2_gpu(
m-b, k-i, BS,
dA(b, b), ldda, dT+b+b*k, k,
dA(b, i), ldda, dwork2, k-i, queue );
}
return *info;
} /* magma_dgeqr2 */
| 7821e5a10b28d091ee5b076f369c4269c72c0e51.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zgeqr2x_gpu-v4.cu, normal z -> d, Mon Jun 25 18:24:14 2018
*/
#include "magma_internal.h"
#include "commonblas_d.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
/***************************************************************************//**
Purpose
-------
DGEQR2 computes a QR factorization of a real m by n matrix A:
A = Q * R.
This expert routine requires two more arguments than the standard
dgeqr2, namely, dT and ddA, explained below. The storage for A is
also not as in the LAPACK's dgeqr2 routine (see below).
The first is used to output the triangular
n x n factor T of the block reflector used in the factorization.
The second holds the diagonal nxn blocks of A, i.e., the diagonal
submatrices of R. This routine implements the left looking QR.
This version adds internal blocking.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in,out]
dA DOUBLE PRECISION array, dimension (LDA,N)
On entry, the m by n matrix A.
On exit, the orthogonal matrix Q as a
product of elementary reflectors (see Further Details).
\n
the elements on and above the diagonal of the array
contain the min(m,n) by n upper trapezoidal matrix R (R is
upper triangular if m >= n); the elements below the diagonal,
with the array TAU, represent the orthogonal matrix Q as a
product of elementary reflectors (see Further Details).
@param[in]
ldda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
dtau DOUBLE PRECISION array, dimension (min(M,N))
The scalar factors of the elementary reflectors (see Further
Details).
@param[out]
dT DOUBLE PRECISION array, dimension N x N.
Stores the triangular N x N factor T of the block reflector
used in the factorization. The lower triangular part is 0.
@param[out]
ddA DOUBLE PRECISION array, dimension N x N.
Stores the elements of the upper N x N diagonal block of A.
LAPACK stores this array in A. There are 0s below the diagonal.
@param
dwork (workspace) DOUBLE PRECISION array, dimension (3 N)
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
@param[in]
queue magma_queue_t
Queue to execute in.
Further Details
---------------
The matrix Q is represented as a product of elementary reflectors
Q = H(1) H(2) . . . H(k), where k = min(m,n).
Each H(i) has the form
H(i) = I - tau * v * v**H
where tau is a real scalar, and v is a real vector with
v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
and tau in TAU(i).
@ingroup magma_geqr2
*******************************************************************************/
extern "C" magma_int_t
magma_dgeqr2x4_gpu(
magma_int_t m, magma_int_t n,
magmaDouble_ptr dA, magma_int_t ldda,
magmaDouble_ptr dtau,
magmaDouble_ptr dT,
magmaDouble_ptr ddA,
magmaDouble_ptr dwork,
magma_queue_t queue,
magma_int_t *info)
{
#define dA(i_,j_) (dA + (j_)*(ldda) + (i_))
#define dT(i_,j_) (dT + (j_)*(k) + (i_))
#define BS 32
magma_int_t i, k;
magmaDouble_ptr dnorm = (magmaDouble_ptr)dwork;
magmaDouble_ptr dwork2 = (magmaDouble_ptr)(dwork + 2*n);
*info = 0;
if (m < 0) {
*info = -1;
} else if (n < 0) {
*info = -2;
} else if (ldda < max(1,m)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
/* Compute the norms of the trailing columns */
k = min(m,n);
magmablas_dnrm2_cols( m, k, dA(0,0), ldda, dnorm, queue );
for (magma_int_t b=0; b < k; b += BS) {
for (i = b; i < min(k, b+BS); ++i) {
/* Apply H**H to A(:,i) from the left */
if (i-b > 0) {
/* Compute the (i-1)th column of T */
if ( i-1 > 0 ) {
magma_dgemv_kernel3
<<< i-1, BLOCK_SIZE, 0, queue->cuda_stream() >>>
( m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), dwork2, dtau+i-1);
magma_dtrmv_kernel2
<<< i-1, i-1, 0, queue->cuda_stream() >>>
( dT(0,0), k, dwork2, dT(0,i-1), dtau+i-1);
}
/* dwork = V**H c */
magma_dgemv_kernel1
<<< i-b, BLOCK_SIZE, 0, queue->cuda_stream() >>>
(m-b, dA(b, b), ldda, dA(b,i), dwork2);
/* dwork = T**H dwork2 */
magma_dtrmv_tkernel
<<< i-b, i-b, 0, queue->cuda_stream() >>>
(dT(b,b), k, dwork2, dwork2+i-b);
/* c = c - V dwork2 */
if ( m-b > 0 ) {
dim3 blocks3( magma_ceildiv( m-b, BLOCK_SIZE ) );
dim3 threads3( BLOCK_SIZE );
magma_dgemv_kernel2
<<< blocks3, threads3, 0, queue->cuda_stream() >>>
(m-b, i-b, dA(b,b), ldda, dwork2+i-b, dA(b, i));
}
}
/* Adjust the dnorm[i] to hold the norm of A(i:m,i) */
if ( i > 0 ) {
magma_dnrm2_adjust_kernel
<<< 1, i, 0, queue->cuda_stream() >>>
(dnorm+i, dA(0, i));
}
/* Generate elementary reflector H(i) to annihilate A(i+1:m,i)
1. 1 is not yet put on the diagonal of A
2. Elements above the diagonal are copied in ddA and
the ones in A are set to zero
3. update T */
magma_dlarfgx_gpu( m-i, dA(i, i), dA(min(i+1,m),i), dtau+i,
dnorm+i, ddA + i + i*n, i, queue );
if (i == 0) {
double tt = MAGMA_D_ONE;
magmablas_dlacpy( MagmaFull, 1, 1, dtau, 1, dT(0,0), 1, queue );
magma_dsetmatrix_async(1, 1, &tt, 1, dA(i, i), 1, queue );
}
}
if ( i-1 > 0 ) {
magma_dgemv_kernel3
<<< i-1, BLOCK_SIZE, 0, queue->cuda_stream() >>>
( m-i+1, dA(i-1,0), ldda, dA(i-1, i-1), dwork2, dtau+i-1);
magma_dtrmv_kernel2
<<< i-1, i-1, 0, queue->cuda_stream() >>>
( dT(0,0), k, dwork2, dT(0,i-1), dtau+i-1);
}
/* Apply the transformations to the trailing matrix. */
//magma_dlarfb2_gpu( MagmaLeft, MagmaConjTrans, MagmaForward, MagmaColumnwise,
magma_dlarfb2_gpu(
m-b, k-i, BS,
dA(b, b), ldda, dT+b+b*k, k,
dA(b, i), ldda, dwork2, k-i, queue );
}
return *info;
} /* magma_dgeqr2 */
|
6c136426cf00b0935ef7541cda2dd1dbaa233eb5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/opencv.hpp>
#include <vector>
__global__ void sobelShared ( unsigned char * data, unsigned char * out, std::size_t w, std::size_t h) {
auto op1 = blockIdx.x * (blockDim.x-2) + threadIdx.x;
auto op2 = blockIdx.y * (blockDim.y-2) + threadIdx.y;
auto op3 = threadIdx.x;
auto op4 = threadIdx.y;
extern __shared__ unsigned char sharedExt[];
if( op1 < w && op2 < h ) {
sharedExt[3 * (op4 * blockDim.x + op3) ] = data[ 3 * ( op2 * w + op1 ) ];
sharedExt[3 * (op4 * blockDim.x + op3) + 1 ] = data[ 3 * ( op2 * w + op1 ) + 1];
sharedExt[3 * (op4 * blockDim.x + op3) + 2 ] = data[ 3 * ( op2 * w + op1 ) + 2 ];
__syncthreads();
auto ww = blockDim.x;
if( op3 > 0 && op3 < (blockDim.x - 1) && op4 > 0 && op4 < (blockDim.y - 1) )
{
for (auto c = 0; c < 3; ++c){
auto op5 = sharedExt[ ((op4-1)*ww + op3 - 1)* 3 + c ] - sharedExt[ ((op4-1)*ww + op3 + 1) * 3 + c ]
+ 2 * sharedExt[ (op4*ww + op3 - 1) * 3 + c ] - 2* sharedExt[ (op4*ww+op3+1) * 3 + c]
+ sharedExt[ ((op4+1)*ww + op3 -1) * 3 + c] - sharedExt[ ((op4+1)*ww +op3 + 1) * 3 + c];
auto op6 = sharedExt[ ((op4-1)*ww + op3 - 1) * 3 + c ] - sharedExt[ ((op4+1)*ww + op3 - 1) * 3 + c ]
+ 2 * sharedExt[ ((op4-1)*ww + op3) * 3 + c ] - 2* sharedExt[ ((op4+1)*ww+op3) * 3 + c ]
+ sharedExt[ ((op4-1)*ww + op3 +1) * 3 + c] - sharedExt[ ((op4+1)*ww +op3 + 1) * 3 + c];
auto res = op5 * op5 + op6 * op6;
res = res > 255*255 ? res = 255*255 : res;
out[ (op2 * w + op1) * 3 + c ] = sqrt( (float)res );
}
}
}
}
int main()
{
cv::Mat img_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED );
auto rows = img_in.rows;
auto cols = img_in.cols;
auto rgb = img_in.data;
std::vector< unsigned char > g( 3 * rows * cols );
cv::Mat img_out( rows, cols, CV_8UC3, g.data() );
unsigned char * rgb_d;
unsigned char * out;
std::size_t size = 3 * img_in.cols * img_in.rows;
hipMalloc( &rgb_d, 3 * rows * cols);
hipMalloc( &out, 3 * rows * cols );
hipStream_t streams[ 4 ];
hipStreamCreate( &streams[ 0 ] );
hipStreamCreate( &streams[ 1 ] );
hipStreamCreate( &streams[ 2 ] );
hipStreamCreate( &streams[ 3 ] );
hipMemcpyAsync( rgb_d, rgb, size/4, hipMemcpyHostToDevice, streams[ 0 ] );
hipMemcpyAsync( rgb_d+size/4, rgb+size/4, size/4, hipMemcpyHostToDevice, streams[ 1 ] );
hipMemcpyAsync( rgb_d+size/2, rgb+size/2, size/4, hipMemcpyHostToDevice, streams[ 1 ] );
hipMemcpyAsync( rgb_d+3*size/4, rgb+3*size/4, size/4, hipMemcpyHostToDevice, streams[ 1 ] );
dim3 dim1( 32, 32 );
dim3 dim2( 3 * (( cols ) / ((dim1.x - 2) + 1) ), (( rows ) / ((dim1.y - 2) + 1) ));
hipEvent_t start, stop;
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start );
hipLaunchKernelGGL(( sobelShared), dim3(dim2), dim3(dim1), 3 * dim1.x * dim1.y, streams[ 0 ] , rgb_d, out, cols, rows/4 + 2);
hipLaunchKernelGGL(( sobelShared), dim3(dim2), dim3(dim1), 3 * dim1.x * dim1.y, streams[ 1 ] , rgb_d+size/4, out+size/4, cols, rows/4 + 4);
hipLaunchKernelGGL(( sobelShared), dim3(dim2), dim3(dim1), 3 * dim1.x * dim1.y, streams[ 2 ] , rgb_d+size/2, out+size/4, cols, rows/4 + 2);
hipLaunchKernelGGL(( sobelShared), dim3(dim2), dim3(dim1), 3 * dim1.x * dim1.y, streams[ 3 ] , rgb_d+3*size/4, out+3*size/4, cols, rows/4 );
hipMemcpyAsync( g.data(), out, size/4, hipMemcpyDeviceToHost, streams[ 0 ] );
hipMemcpyAsync( g.data()+size/4, out+size/4, size/4, hipMemcpyDeviceToHost, streams[ 1 ] );
hipMemcpyAsync( g.data()+size/2, out+size/2, size/4, hipMemcpyDeviceToHost, streams[ 2 ] );
hipMemcpyAsync( g.data()+3*size/4, out+3*size/4, size/4, hipMemcpyDeviceToHost, streams[ 3 ] );
hipDeviceSynchronize();
hipDeviceSynchronize();
auto hipError_t = hipGetLastError();
if (hipError_t != hipSuccess){
std::cout << hipGetErrorName(hipError_t) << std::endl;
std::cout << hipGetErrorString(hipError_t) << std::endl;
}
else {
std::cout << "No Errors!" << std::endl;
}
hipEventRecord( stop );
hipEventSynchronize( stop );
float duration = 0.0f;
hipEventElapsedTime( &duration, start, stop );
std::cout << "Total: " << duration << "ms\n";
cv::imwrite( "SobelSharedStreamOutput.jpg", img_out );
hipFree( rgb_d);
hipFree ( out);
return 0;
}
| 6c136426cf00b0935ef7541cda2dd1dbaa233eb5.cu | #include <opencv2/opencv.hpp>
#include <vector>
__global__ void sobelShared ( unsigned char * data, unsigned char * out, std::size_t w, std::size_t h) {
auto op1 = blockIdx.x * (blockDim.x-2) + threadIdx.x;
auto op2 = blockIdx.y * (blockDim.y-2) + threadIdx.y;
auto op3 = threadIdx.x;
auto op4 = threadIdx.y;
extern __shared__ unsigned char sharedExt[];
if( op1 < w && op2 < h ) {
sharedExt[3 * (op4 * blockDim.x + op3) ] = data[ 3 * ( op2 * w + op1 ) ];
sharedExt[3 * (op4 * blockDim.x + op3) + 1 ] = data[ 3 * ( op2 * w + op1 ) + 1];
sharedExt[3 * (op4 * blockDim.x + op3) + 2 ] = data[ 3 * ( op2 * w + op1 ) + 2 ];
__syncthreads();
auto ww = blockDim.x;
if( op3 > 0 && op3 < (blockDim.x - 1) && op4 > 0 && op4 < (blockDim.y - 1) )
{
for (auto c = 0; c < 3; ++c){
auto op5 = sharedExt[ ((op4-1)*ww + op3 - 1)* 3 + c ] - sharedExt[ ((op4-1)*ww + op3 + 1) * 3 + c ]
+ 2 * sharedExt[ (op4*ww + op3 - 1) * 3 + c ] - 2* sharedExt[ (op4*ww+op3+1) * 3 + c]
+ sharedExt[ ((op4+1)*ww + op3 -1) * 3 + c] - sharedExt[ ((op4+1)*ww +op3 + 1) * 3 + c];
auto op6 = sharedExt[ ((op4-1)*ww + op3 - 1) * 3 + c ] - sharedExt[ ((op4+1)*ww + op3 - 1) * 3 + c ]
+ 2 * sharedExt[ ((op4-1)*ww + op3) * 3 + c ] - 2* sharedExt[ ((op4+1)*ww+op3) * 3 + c ]
+ sharedExt[ ((op4-1)*ww + op3 +1) * 3 + c] - sharedExt[ ((op4+1)*ww +op3 + 1) * 3 + c];
auto res = op5 * op5 + op6 * op6;
res = res > 255*255 ? res = 255*255 : res;
out[ (op2 * w + op1) * 3 + c ] = sqrt( (float)res );
}
}
}
}
int main()
{
cv::Mat img_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED );
auto rows = img_in.rows;
auto cols = img_in.cols;
auto rgb = img_in.data;
std::vector< unsigned char > g( 3 * rows * cols );
cv::Mat img_out( rows, cols, CV_8UC3, g.data() );
unsigned char * rgb_d;
unsigned char * out;
std::size_t size = 3 * img_in.cols * img_in.rows;
cudaMalloc( &rgb_d, 3 * rows * cols);
cudaMalloc( &out, 3 * rows * cols );
cudaStream_t streams[ 4 ];
cudaStreamCreate( &streams[ 0 ] );
cudaStreamCreate( &streams[ 1 ] );
cudaStreamCreate( &streams[ 2 ] );
cudaStreamCreate( &streams[ 3 ] );
cudaMemcpyAsync( rgb_d, rgb, size/4, cudaMemcpyHostToDevice, streams[ 0 ] );
cudaMemcpyAsync( rgb_d+size/4, rgb+size/4, size/4, cudaMemcpyHostToDevice, streams[ 1 ] );
cudaMemcpyAsync( rgb_d+size/2, rgb+size/2, size/4, cudaMemcpyHostToDevice, streams[ 1 ] );
cudaMemcpyAsync( rgb_d+3*size/4, rgb+3*size/4, size/4, cudaMemcpyHostToDevice, streams[ 1 ] );
dim3 dim1( 32, 32 );
dim3 dim2( 3 * (( cols ) / ((dim1.x - 2) + 1) ), (( rows ) / ((dim1.y - 2) + 1) ));
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start );
sobelShared<<< dim2, dim1, 3 * dim1.x * dim1.y, streams[ 0 ] >>>( rgb_d, out, cols, rows/4 + 2);
sobelShared<<< dim2, dim1, 3 * dim1.x * dim1.y, streams[ 1 ] >>>( rgb_d+size/4, out+size/4, cols, rows/4 + 4);
sobelShared<<< dim2, dim1, 3 * dim1.x * dim1.y, streams[ 2 ] >>>( rgb_d+size/2, out+size/4, cols, rows/4 + 2);
sobelShared<<< dim2, dim1, 3 * dim1.x * dim1.y, streams[ 3 ] >>>( rgb_d+3*size/4, out+3*size/4, cols, rows/4 );
cudaMemcpyAsync( g.data(), out, size/4, cudaMemcpyDeviceToHost, streams[ 0 ] );
cudaMemcpyAsync( g.data()+size/4, out+size/4, size/4, cudaMemcpyDeviceToHost, streams[ 1 ] );
cudaMemcpyAsync( g.data()+size/2, out+size/2, size/4, cudaMemcpyDeviceToHost, streams[ 2 ] );
cudaMemcpyAsync( g.data()+3*size/4, out+3*size/4, size/4, cudaMemcpyDeviceToHost, streams[ 3 ] );
cudaDeviceSynchronize();
cudaDeviceSynchronize();
auto cudaError = cudaGetLastError();
if (cudaError != cudaSuccess){
std::cout << cudaGetErrorName(cudaError) << std::endl;
std::cout << cudaGetErrorString(cudaError) << std::endl;
}
else {
std::cout << "No Errors!" << std::endl;
}
cudaEventRecord( stop );
cudaEventSynchronize( stop );
float duration = 0.0f;
cudaEventElapsedTime( &duration, start, stop );
std::cout << "Total: " << duration << "ms\n";
cv::imwrite( "SobelSharedStreamOutput.jpg", img_out );
cudaFree( rgb_d);
cudaFree ( out);
return 0;
}
|
a7901c92fd6dba35e187cb052ccbccd02e56f47a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tsvd.h"
#include "tsvd_c.h"
namespace ML {
using namespace MLCommon;
void tsvdFit(float *input, float *components, float *singular_vals, paramsTSVD prms) {
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipsolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(hipsolverDnCreate(&cusolver_handle));
tsvdFit(input, components, singular_vals, prms, cublas_handle, cusolver_handle);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolver_handle));
}
void tsvdFit(double *input, double *components, double *singular_vals, paramsTSVD prms) {
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipsolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(hipsolverDnCreate(&cusolver_handle));
tsvdFit(input, components, singular_vals, prms, cublas_handle, cusolver_handle);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolver_handle));
}
void tsvdFitTransform(float *input, float *trans_input, float *components,
float *explained_var, float *explained_var_ratio, float *singular_vals,
paramsTSVD prms) {
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipsolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(hipsolverDnCreate(&cusolver_handle));
tsvdFitTransform(input, trans_input, components, explained_var,
explained_var_ratio, singular_vals, prms, cublas_handle,
cusolver_handle);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolver_handle));
}
void tsvdFitTransform(double *input, double *trans_input, double *components,
double *explained_var, double *explained_var_ratio,
double *singular_vals, paramsTSVD prms) {
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipsolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(hipsolverDnCreate(&cusolver_handle));
tsvdFitTransform(input, trans_input, components, explained_var,
explained_var_ratio, singular_vals, prms, cublas_handle,
cusolver_handle);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolver_handle));
}
void tsvdTransform(float *input, float *components, float *trans_input,
paramsTSVD prms) {
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
tsvdTransform(input, components, trans_input, prms, cublas_handle);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
}
void tsvdTransform(double *input, double *components, double *trans_input,
paramsTSVD prms) {
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
tsvdTransform(input, components, trans_input, prms, cublas_handle);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
}
void tsvdInverseTransform(float *trans_input, float *components, float *input,
paramsTSVD prms) {
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
tsvdInverseTransform(trans_input, components, input, prms, cublas_handle);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
}
void tsvdInverseTransform(double *trans_input, double *components,
double *input, paramsTSVD prms) {
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
tsvdInverseTransform(trans_input, components, input, prms, cublas_handle);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
}
/** @} */
}
;
// end namespace ML
| a7901c92fd6dba35e187cb052ccbccd02e56f47a.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tsvd.h"
#include "tsvd_c.h"
namespace ML {
using namespace MLCommon;
void tsvdFit(float *input, float *components, float *singular_vals, paramsTSVD prms) {
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cusolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(cusolverDnCreate(&cusolver_handle));
tsvdFit(input, components, singular_vals, prms, cublas_handle, cusolver_handle);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUSOLVER_CHECK(cusolverDnDestroy(cusolver_handle));
}
void tsvdFit(double *input, double *components, double *singular_vals, paramsTSVD prms) {
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cusolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(cusolverDnCreate(&cusolver_handle));
tsvdFit(input, components, singular_vals, prms, cublas_handle, cusolver_handle);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUSOLVER_CHECK(cusolverDnDestroy(cusolver_handle));
}
void tsvdFitTransform(float *input, float *trans_input, float *components,
float *explained_var, float *explained_var_ratio, float *singular_vals,
paramsTSVD prms) {
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cusolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(cusolverDnCreate(&cusolver_handle));
tsvdFitTransform(input, trans_input, components, explained_var,
explained_var_ratio, singular_vals, prms, cublas_handle,
cusolver_handle);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUSOLVER_CHECK(cusolverDnDestroy(cusolver_handle));
}
void tsvdFitTransform(double *input, double *trans_input, double *components,
double *explained_var, double *explained_var_ratio,
double *singular_vals, paramsTSVD prms) {
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cusolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(cusolverDnCreate(&cusolver_handle));
tsvdFitTransform(input, trans_input, components, explained_var,
explained_var_ratio, singular_vals, prms, cublas_handle,
cusolver_handle);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUSOLVER_CHECK(cusolverDnDestroy(cusolver_handle));
}
void tsvdTransform(float *input, float *components, float *trans_input,
paramsTSVD prms) {
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
tsvdTransform(input, components, trans_input, prms, cublas_handle);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
}
void tsvdTransform(double *input, double *components, double *trans_input,
paramsTSVD prms) {
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
tsvdTransform(input, components, trans_input, prms, cublas_handle);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
}
void tsvdInverseTransform(float *trans_input, float *components, float *input,
paramsTSVD prms) {
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
tsvdInverseTransform(trans_input, components, input, prms, cublas_handle);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
}
void tsvdInverseTransform(double *trans_input, double *components,
double *input, paramsTSVD prms) {
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
tsvdInverseTransform(trans_input, components, input, prms, cublas_handle);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
}
/** @} */
}
;
// end namespace ML
|
dbc196c57217891a10186ce89da1f8a7925e1f91.hip | // !!! This is a file automatically generated by hipify!!!
#include "util.cuh"
#include "cudense.h"
#include "kernels.h"
cudenseLayer cudenseLayer_init(int M, int N)
{
cudenseLayer dl; DENSEL_INIT(dl); dl.M=M; dl.N=N;
return dl;
}
void cudenseLayer_free(cudenseLayer *dl)
{
cuftens_free(&dl->W); cuftens_free(&dl->b);
cuftens_free(&dl->dW); cuftens_free(&dl->db);
cuftens_free(&dl->out); cuftens_free(&dl->in);
}
void cudenseLayer_convert(denseLayer *src, cudenseLayer *dst)
{
cudenseLayer_set(&src->W, dst);
}
void cudenseLayer_set(ftens *W, cudenseLayer *dl)
{
int M=W->M, N=W->N;
cudenseLayer_free(dl);
dl->M=M; dl->N=N; dl->W=cuftens_init(1, M, N, 1);
hipMemcpy(dl->W.data, W->data, W->bytes, cuHtoD);
}
void cudenseLayer_copy_input(cuftens *t, cudenseLayer *dl)
{
if (!dl->in.data)
dl->in = cuftens_init(t->D, t->M, t->N, t->L);
hipMemcpy(dl->in.data, t->data, t->bytes, cuHtoD);
}
void cudenseLayer_forward(cuftens *t, cudenseLayer *dl, int save)
{
int D=t->D, M=t->M, N=t->N;
cuftens_reshape(t, D, 1, t->MNL, 1);
cuASSERT(t->MNL == dl->N, "err: cudense shape\n");
if (save) cudenseLayer_copy_input(t, dl);
if (!dl->out.data) dl->out=cuftens_init(D, 1, dl->M, 1);
if (D == 1) sgemv(&dl->W, t, &dl->out);
else sgemm(M, 1, N, t->data, N, dl->W.data, N,
dl->out.data, 1);
}
void cudenseLayer_backward(cuftens *dt, cudenseLayer *dl)
{
fprintf(stderr, "err: dense bprop not implemented yet\n");
exit(-2);
}
void cudenseLayer_print_size(cudenseLayer *dl)
{
printf("cudense: %d %d\n", dl->M, dl->N);
}
| dbc196c57217891a10186ce89da1f8a7925e1f91.cu | #include "util.cuh"
#include "cudense.h"
#include "kernels.h"
cudenseLayer cudenseLayer_init(int M, int N)
{
cudenseLayer dl; DENSEL_INIT(dl); dl.M=M; dl.N=N;
return dl;
}
void cudenseLayer_free(cudenseLayer *dl)
{
cuftens_free(&dl->W); cuftens_free(&dl->b);
cuftens_free(&dl->dW); cuftens_free(&dl->db);
cuftens_free(&dl->out); cuftens_free(&dl->in);
}
void cudenseLayer_convert(denseLayer *src, cudenseLayer *dst)
{
cudenseLayer_set(&src->W, dst);
}
void cudenseLayer_set(ftens *W, cudenseLayer *dl)
{
int M=W->M, N=W->N;
cudenseLayer_free(dl);
dl->M=M; dl->N=N; dl->W=cuftens_init(1, M, N, 1);
cudaMemcpy(dl->W.data, W->data, W->bytes, cuHtoD);
}
void cudenseLayer_copy_input(cuftens *t, cudenseLayer *dl)
{
if (!dl->in.data)
dl->in = cuftens_init(t->D, t->M, t->N, t->L);
cudaMemcpy(dl->in.data, t->data, t->bytes, cuHtoD);
}
void cudenseLayer_forward(cuftens *t, cudenseLayer *dl, int save)
{
int D=t->D, M=t->M, N=t->N;
cuftens_reshape(t, D, 1, t->MNL, 1);
cuASSERT(t->MNL == dl->N, "err: cudense shape\n");
if (save) cudenseLayer_copy_input(t, dl);
if (!dl->out.data) dl->out=cuftens_init(D, 1, dl->M, 1);
if (D == 1) sgemv(&dl->W, t, &dl->out);
else sgemm(M, 1, N, t->data, N, dl->W.data, N,
dl->out.data, 1);
}
void cudenseLayer_backward(cuftens *dt, cudenseLayer *dl)
{
fprintf(stderr, "err: dense bprop not implemented yet\n");
exit(-2);
}
void cudenseLayer_print_size(cudenseLayer *dl)
{
printf("cudense: %d %d\n", dl->M, dl->N);
}
|
c9dd0f8e755b95e8b643ac525912a5717a5afd64.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <stdbool.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <inttypes.h>
#include <chrono>
#include "Functions.cu"
#include "sample_board.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main(int argc, char* argv[]) {
uint64_t test[N];
memset(test,0,N*sizeof(uint64_t));
uint64_t *new_boards;
uint64_t *old_boards;
int *board_index;
const int sk = pow(2,27);
gpuErrchk(hipMallocManaged(&new_boards,sk*sizeof(uint64_t)));
gpuErrchk(hipMallocManaged(&old_boards,sk*sizeof(uint64_t)));
gpuErrchk(hipMallocManaged(&board_index,sizeof(int)));
int host_count;
int maxBlocks;
int zeros;
params_t params;
setup_board(test,sample_board);
print_sudoku(test);
zeros=count_zeros(test);
gpuErrchk(hipMemcpy(new_boards,test,N*sizeof(uint64_t),hipMemcpyHostToDevice));
auto start = std::chrono::high_resolution_clock::now();
params=find_empty_index(test,0,0);
printf("Init index (%i, %i)\n", params.row, params.col);
hipLaunchKernelGGL(( cudaBFSSudoku), dim3(1),dim3(N), 0, 0, new_boards, old_boards, 1, board_index, params.row, params.col);
params=find_empty_index(old_boards, params.row, params.col);
for (int i = 0; i<zeros; i++) {
//gpuErrchk(hipMemcpy(&host_count, board_index, sizeof(int), hipMemcpyDeviceToHost));
printf("total boards after an iteration %d: %d\n", i, board_index);
board_index = 0;
maxBlocks=(N*host_count+256-1)/256;
if (i % 2 == 0) {
hipLaunchKernelGGL(( cudaBFSSudoku), dim3(maxBlocks),dim3(256), 0, 0, old_boards, new_boards, host_count, board_index, params.row, params.col);
params=find_empty_index(new_boards, params.row, params.col);
}
else {
hipLaunchKernelGGL(( cudaBFSSudoku), dim3(maxBlocks),dim3(256), 0, 0, new_boards, old_boards, host_count, board_index, params.row, params.col);
params=find_empty_index(old_boards, params.row, params.col);
}
}
//gpuErrchk(hipMemcpy(&host_count, board_index, sizeof(int), hipMemcpyDeviceToHost));
if(zeros % 2 == 0){ // if odd number of iterations run, then send it old boards not new boards;
print_sudoku(new_boards);
}
else{
print_sudoku(old_boards);
}
gpuErrchk(hipDeviceSynchronize());
auto stop = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start);
std::cout << duration.count() << std::endl;
gpuErrchk(hipFree(new_boards));
gpuErrchk(hipFree(old_boards));
gpuErrchk(hipFree(board_index));
return 0;
}
| c9dd0f8e755b95e8b643ac525912a5717a5afd64.cu | #include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <stdbool.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <inttypes.h>
#include <chrono>
#include "Functions.cu"
#include "sample_board.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main(int argc, char* argv[]) {
uint64_t test[N];
memset(test,0,N*sizeof(uint64_t));
uint64_t *new_boards;
uint64_t *old_boards;
int *board_index;
const int sk = pow(2,27);
gpuErrchk(cudaMallocManaged(&new_boards,sk*sizeof(uint64_t)));
gpuErrchk(cudaMallocManaged(&old_boards,sk*sizeof(uint64_t)));
gpuErrchk(cudaMallocManaged(&board_index,sizeof(int)));
int host_count;
int maxBlocks;
int zeros;
params_t params;
setup_board(test,sample_board);
print_sudoku(test);
zeros=count_zeros(test);
gpuErrchk(cudaMemcpy(new_boards,test,N*sizeof(uint64_t),cudaMemcpyHostToDevice));
auto start = std::chrono::high_resolution_clock::now();
params=find_empty_index(test,0,0);
printf("Init index (%i, %i)\n", params.row, params.col);
cudaBFSSudoku<<<1,N>>>(new_boards, old_boards, 1, board_index, params.row, params.col);
params=find_empty_index(old_boards, params.row, params.col);
for (int i = 0; i<zeros; i++) {
//gpuErrchk(cudaMemcpy(&host_count, board_index, sizeof(int), cudaMemcpyDeviceToHost));
printf("total boards after an iteration %d: %d\n", i, board_index);
board_index = 0;
maxBlocks=(N*host_count+256-1)/256;
if (i % 2 == 0) {
cudaBFSSudoku<<<maxBlocks,256>>>(old_boards, new_boards, host_count, board_index, params.row, params.col);
params=find_empty_index(new_boards, params.row, params.col);
}
else {
cudaBFSSudoku<<<maxBlocks,256>>>(new_boards, old_boards, host_count, board_index, params.row, params.col);
params=find_empty_index(old_boards, params.row, params.col);
}
}
//gpuErrchk(cudaMemcpy(&host_count, board_index, sizeof(int), cudaMemcpyDeviceToHost));
if(zeros % 2 == 0){ // if odd number of iterations run, then send it old boards not new boards;
print_sudoku(new_boards);
}
else{
print_sudoku(old_boards);
}
gpuErrchk(cudaDeviceSynchronize());
auto stop = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start);
std::cout << duration.count() << std::endl;
gpuErrchk(cudaFree(new_boards));
gpuErrchk(cudaFree(old_boards));
gpuErrchk(cudaFree(board_index));
return 0;
}
|
51a8df64bb2e8100beafa8cb74388aeac6225790.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <conio.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
__global__ void what_is_my_id_2d_A( unsigned int * const block_x, unsigned int * const block_y, unsigned int * const thread, unsigned int * const calc_thread, unsigned int * const x_thread, unsigned int * const y_thread, unsigned int * const grid_dimx, unsigned int * const block_dimx, unsigned int * const grid_dimy, unsigned int * const block_dimy) {
const unsigned int idx =(blockIdx.x * blockDim.x) +threadIdx.x;
const unsigned int idy =(blockIdx.y * blockDim.y) +threadIdx.y;
const unsigned int thread_idx =((gridDim.x * blockDim.x) * idy) +idx;
block_x[thread_idx] =blockIdx.x;
block_y[thread_idx] =blockIdx.y;
thread[thread_idx] =threadIdx.x;
calc_thread[thread_idx] =thread_idx;
x_thread[thread_idx] =idx;
y_thread[thread_idx] =idy;
grid_dimx[thread_idx] =gridDim.x;
block_dimx[thread_idx] =blockDim.x;
grid_dimy[thread_idx] =gridDim.y;
block_dimy[thread_idx] =blockDim.y;
}
#define ARRAY_SIZE_X 32
#define ARRAY_SIZE_Y 16
#define ARRAY_SIZE_IN_BYTES ((ARRAY_SIZE_X) * (ARRAY_SIZE_Y) * (sizeof(unsigned int))) /* Declare statically six arrays of ARRAY_SIZE each */
unsigned int cpu_block_x[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_block_y[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_thread[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_warp[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_calc_thread[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_xthread[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_ythread[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_grid_dimx[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_block_dimx[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_grid_dimy[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_block_dimy[ARRAY_SIZE_Y][ARRAY_SIZE_X];
int main(void) {
/* Total thread count =32 * 4 =128 */
const dim3 threads_rect(32, 4); /* 32 * 4 */
const dim3 blocks_rect(1,4); /* Total thread count =16 * 8 =128 */
const dim3 threads_square(16, 8); /* 16 * 8 */
const dim3 blocks_square(2,2); /* Needed to wait for a character at exit */
char ch; /* Declare pointers for GPU based params */
unsigned int * gpu_block_x;
unsigned int * gpu_block_y;
unsigned int * gpu_thread;
unsigned int * gpu_warp;
unsigned int * gpu_calc_thread;
unsigned int * gpu_xthread;
unsigned int * gpu_ythread;
unsigned int * gpu_grid_dimx;
unsigned int * gpu_block_dimx;
unsigned int * gpu_grid_dimy;
unsigned int * gpu_block_dimy; /* Allocate four arrays on the GPU */
hipMalloc((void **)&gpu_block_x, ARRAY_SIZE_IN_BYTES);
hipMalloc((void **)&gpu_block_y, ARRAY_SIZE_IN_BYTES);
hipMalloc((void **)&gpu_thread, ARRAY_SIZE_IN_BYTES);
hipMalloc((void **)&gpu_calc_thread, ARRAY_SIZE_IN_BYTES);
hipMalloc((void **)&gpu_xthread, ARRAY_SIZE_IN_BYTES);
hipMalloc((void **)&gpu_ythread, ARRAY_SIZE_IN_BYTES);
hipMalloc((void **)&gpu_grid_dimx, ARRAY_SIZE_IN_BYTES);
hipMalloc((void **)&gpu_block_dimx, ARRAY_SIZE_IN_BYTES);
hipMalloc((void **)&gpu_grid_dimy, ARRAY_SIZE_IN_BYTES);
hipMalloc((void **)&gpu_block_dimy, ARRAY_SIZE_IN_BYTES);
for (int kernel=0; kernel < 2; kernel++) {
switch (kernel) {
case 0: {
/* Execute our kernel */hipLaunchKernelGGL(( what_is_my_id_2d_A), dim3(blocks_rect), dim3(threads_rect), 0, 0, gpu_block_x, gpu_block_y,gpu_thread, gpu_calc_thread, gpu_xthread, gpu_ythread, gpu_grid_dimx, gpu_block_dimx,gpu_grid_dimy, gpu_block_dimy);
}
break;
case 1: {
/* Execute our kernel */hipLaunchKernelGGL(( what_is_my_id_2d_A), dim3(blocks_square), dim3(threads_square), 0, 0, gpu_block_x, gpu_block_y,gpu_thread, gpu_calc_thread, gpu_xthread, gpu_ythread, gpu_grid_dimx, gpu_block_dimx,gpu_grid_dimy, gpu_block_dimy);
}
break;
default:
exit(1);
break;
} /* Copy back the gpu results to the CPU */
hipMemcpy(cpu_block_x, gpu_block_x, ARRAY_SIZE_IN_BYTES,hipMemcpyDeviceToHost);
hipMemcpy(cpu_block_y, gpu_block_y, ARRAY_SIZE_IN_BYTES,hipMemcpyDeviceToHost);
hipMemcpy(cpu_thread, gpu_thread, ARRAY_SIZE_IN_BYTES,hipMemcpyDeviceToHost);
hipMemcpy(cpu_calc_thread, gpu_calc_thread, ARRAY_SIZE_IN_BYTES,hipMemcpyDeviceToHost);
hipMemcpy(cpu_xthread, gpu_xthread, ARRAY_SIZE_IN_BYTES,hipMemcpyDeviceToHost);
hipMemcpy(cpu_ythread, gpu_ythread, ARRAY_SIZE_IN_BYTES,hipMemcpyDeviceToHost);
hipMemcpy(cpu_grid_dimx, gpu_grid_dimx, ARRAY_SIZE_IN_BYTES,hipMemcpyDeviceToHost);
hipMemcpy(cpu_block_dimx,gpu_block_dimx, ARRAY_SIZE_IN_BYTES,hipMemcpyDeviceToHost);
hipMemcpy(cpu_grid_dimy, gpu_grid_dimy, ARRAY_SIZE_IN_BYTES,hipMemcpyDeviceToHost);
hipMemcpy(cpu_block_dimy, gpu_block_dimy, ARRAY_SIZE_IN_BYTES,hipMemcpyDeviceToHost);
printf("\nKernel %d\n", kernel); /* Iterate through the arrays and print */
for (int y=0; y < ARRAY_SIZE_Y; y++) {
for (int x=0; x < ARRAY_SIZE_X; x++) {
printf("CT: %2u BKX: %1u BKY: %1u TID: %2u YTID: %2u XTID: %2u GDX: %1u BDX: %1u GDY %1u BDY %1u\n", cpu_calc_thread[y][x], cpu_block_x[y][x], cpu_block_y[y][x],cpu_thread[y][x], cpu_ythread[y][x], cpu_xthread[y][x], cpu_grid_dimx[y][x],cpu_block_dimx[y][x], cpu_grid_dimy[y][x], cpu_block_dimy[y][x]);
/* Wait for any key so we can see the console window */
ch =getch();
}
} /* Wait for any key so we can see the console window */
printf("Press any key to continue\n");
ch =getch();
} /* Free the arrays on the GPU as now were done with them */
hipFree(gpu_block_x);
hipFree(gpu_block_y);
hipFree(gpu_thread);
hipFree(gpu_calc_thread);
hipFree(gpu_xthread);
hipFree(gpu_ythread);
hipFree(gpu_grid_dimx);
hipFree(gpu_block_dimx);
hipFree(gpu_grid_dimy);
hipFree(gpu_block_dimy);
} | 51a8df64bb2e8100beafa8cb74388aeac6225790.cu | #include <stdio.h>
#include <stdlib.h>
#include <conio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void what_is_my_id_2d_A( unsigned int * const block_x, unsigned int * const block_y, unsigned int * const thread, unsigned int * const calc_thread, unsigned int * const x_thread, unsigned int * const y_thread, unsigned int * const grid_dimx, unsigned int * const block_dimx, unsigned int * const grid_dimy, unsigned int * const block_dimy) {
const unsigned int idx =(blockIdx.x * blockDim.x) +threadIdx.x;
const unsigned int idy =(blockIdx.y * blockDim.y) +threadIdx.y;
const unsigned int thread_idx =((gridDim.x * blockDim.x) * idy) +idx;
block_x[thread_idx] =blockIdx.x;
block_y[thread_idx] =blockIdx.y;
thread[thread_idx] =threadIdx.x;
calc_thread[thread_idx] =thread_idx;
x_thread[thread_idx] =idx;
y_thread[thread_idx] =idy;
grid_dimx[thread_idx] =gridDim.x;
block_dimx[thread_idx] =blockDim.x;
grid_dimy[thread_idx] =gridDim.y;
block_dimy[thread_idx] =blockDim.y;
}
#define ARRAY_SIZE_X 32
#define ARRAY_SIZE_Y 16
#define ARRAY_SIZE_IN_BYTES ((ARRAY_SIZE_X) * (ARRAY_SIZE_Y) * (sizeof(unsigned int))) /* Declare statically six arrays of ARRAY_SIZE each */
unsigned int cpu_block_x[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_block_y[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_thread[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_warp[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_calc_thread[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_xthread[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_ythread[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_grid_dimx[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_block_dimx[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_grid_dimy[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_block_dimy[ARRAY_SIZE_Y][ARRAY_SIZE_X];
int main(void) {
/* Total thread count =32 * 4 =128 */
const dim3 threads_rect(32, 4); /* 32 * 4 */
const dim3 blocks_rect(1,4); /* Total thread count =16 * 8 =128 */
const dim3 threads_square(16, 8); /* 16 * 8 */
const dim3 blocks_square(2,2); /* Needed to wait for a character at exit */
char ch; /* Declare pointers for GPU based params */
unsigned int * gpu_block_x;
unsigned int * gpu_block_y;
unsigned int * gpu_thread;
unsigned int * gpu_warp;
unsigned int * gpu_calc_thread;
unsigned int * gpu_xthread;
unsigned int * gpu_ythread;
unsigned int * gpu_grid_dimx;
unsigned int * gpu_block_dimx;
unsigned int * gpu_grid_dimy;
unsigned int * gpu_block_dimy; /* Allocate four arrays on the GPU */
cudaMalloc((void **)&gpu_block_x, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_block_y, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_thread, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_calc_thread, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_xthread, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_ythread, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_grid_dimx, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_block_dimx, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_grid_dimy, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_block_dimy, ARRAY_SIZE_IN_BYTES);
for (int kernel=0; kernel < 2; kernel++) {
switch (kernel) {
case 0: {
/* Execute our kernel */ what_is_my_id_2d_A<<<blocks_rect, threads_rect>>>(gpu_block_x, gpu_block_y,gpu_thread, gpu_calc_thread, gpu_xthread, gpu_ythread, gpu_grid_dimx, gpu_block_dimx,gpu_grid_dimy, gpu_block_dimy);
}
break;
case 1: {
/* Execute our kernel */ what_is_my_id_2d_A<<<blocks_square, threads_square>>>(gpu_block_x, gpu_block_y,gpu_thread, gpu_calc_thread, gpu_xthread, gpu_ythread, gpu_grid_dimx, gpu_block_dimx,gpu_grid_dimy, gpu_block_dimy);
}
break;
default:
exit(1);
break;
} /* Copy back the gpu results to the CPU */
cudaMemcpy(cpu_block_x, gpu_block_x, ARRAY_SIZE_IN_BYTES,cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_block_y, gpu_block_y, ARRAY_SIZE_IN_BYTES,cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_thread, gpu_thread, ARRAY_SIZE_IN_BYTES,cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_calc_thread, gpu_calc_thread, ARRAY_SIZE_IN_BYTES,cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_xthread, gpu_xthread, ARRAY_SIZE_IN_BYTES,cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_ythread, gpu_ythread, ARRAY_SIZE_IN_BYTES,cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_grid_dimx, gpu_grid_dimx, ARRAY_SIZE_IN_BYTES,cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_block_dimx,gpu_block_dimx, ARRAY_SIZE_IN_BYTES,cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_grid_dimy, gpu_grid_dimy, ARRAY_SIZE_IN_BYTES,cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_block_dimy, gpu_block_dimy, ARRAY_SIZE_IN_BYTES,cudaMemcpyDeviceToHost);
printf("\nKernel %d\n", kernel); /* Iterate through the arrays and print */
for (int y=0; y < ARRAY_SIZE_Y; y++) {
for (int x=0; x < ARRAY_SIZE_X; x++) {
printf("CT: %2u BKX: %1u BKY: %1u TID: %2u YTID: %2u XTID: %2u GDX: %1u BDX: %1u GDY %1u BDY %1u\n", cpu_calc_thread[y][x], cpu_block_x[y][x], cpu_block_y[y][x],cpu_thread[y][x], cpu_ythread[y][x], cpu_xthread[y][x], cpu_grid_dimx[y][x],cpu_block_dimx[y][x], cpu_grid_dimy[y][x], cpu_block_dimy[y][x]);
/* Wait for any key so we can see the console window */
ch =getch();
}
} /* Wait for any key so we can see the console window */
printf("Press any key to continue\n");
ch =getch();
} /* Free the arrays on the GPU as now we¡¯re done with them */
cudaFree(gpu_block_x);
cudaFree(gpu_block_y);
cudaFree(gpu_thread);
cudaFree(gpu_calc_thread);
cudaFree(gpu_xthread);
cudaFree(gpu_ythread);
cudaFree(gpu_grid_dimx);
cudaFree(gpu_block_dimx);
cudaFree(gpu_grid_dimy);
cudaFree(gpu_block_dimy);
} |
7a0dc242a5d661a802011cd41fe34cf06d5b9d04.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/PinnedMemoryAllocator.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/hip/MiscUtils.h>
#include <ATen/native/Resize.h>
#include <ATen/native/BatchLinearAlgebra.h>
#include <ATen/native/hip/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THH/THH.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma_types.h>
#include <magma_v2.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSyevd(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaEig(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, scalar_t *A, magma_int_t lda,
scalar_t *w, scalar_t *VL, magma_int_t ldvl,
scalar_t *VR, magma_int_t ldvr, scalar_t *work, magma_int_t lwork,
value_t *rwork,
magma_int_t *info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaGels(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb,
scalar_t* hwork, magma_int_t lwork, magma_int_t* info);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesv_gpu(n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesv_gpu(n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_zgesv_batched(n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_cgesv_batched(n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>(
magma_int_t n) {
return magma_get_zgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>(
magma_int_t n) {
return magma_get_cgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetri<c10::complex<double>>(
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<double>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetri_gpu(
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaDoubleComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetri<c10::complex<float>>(
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<float>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetri_gpu(
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaFloatComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<double>>(
magma_int_t n,
c10::complex<double>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<double>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetri_outofplace_batched(
n,
reinterpret_cast<magmaDoubleComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<float>>(
magma_int_t n,
c10::complex<float>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<float>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetri_outofplace_batched(
n,
reinterpret_cast<magmaFloatComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaFloatComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaDoubleComplex alpha({1, 0});
magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaFloatComplex alpha({1, 0});
magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
double *A, magma_int_t lda,
double *w,
double *VL, magma_int_t ldvl,
double *VR, magma_int_t ldvr,
double *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
// magma [sd]geev wants to separate output arrays: wr and wi for the real
// and imaginary parts
double *wr = w;
double *wi = w + n;
(void)rwork; // unused
magma_dgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
float *A, magma_int_t lda,
float *w,
float *VL, magma_int_t ldvl,
float *VR, magma_int_t ldvr,
float *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
float *wr = w;
float *wi = w + n;
(void)rwork; // unused
magma_sgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<c10::complex<double>, double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<double> *A, magma_int_t lda,
c10::complex<double> *w,
c10::complex<double> *VL, magma_int_t ldvl,
c10::complex<double> *VR, magma_int_t ldvr,
c10::complex<double> *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_zgeev(jobvl, jobvr, n,
reinterpret_cast<magmaDoubleComplex*>(A), lda,
reinterpret_cast<magmaDoubleComplex*>(w),
reinterpret_cast<magmaDoubleComplex*>(VL), ldvl,
reinterpret_cast<magmaDoubleComplex*>(VR), ldvr,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<c10::complex<float>, float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<float> *A, magma_int_t lda,
c10::complex<float> *w,
c10::complex<float> *VL, magma_int_t ldvl,
c10::complex<float> *VR, magma_int_t ldvr,
c10::complex<float> *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_cgeev(jobvl, jobvr, n,
reinterpret_cast<magmaFloatComplex*>(A), lda,
reinterpret_cast<magmaFloatComplex*>(w),
reinterpret_cast<magmaFloatComplex*>(VL), ldvl,
reinterpret_cast<magmaFloatComplex*>(VR), ldvr,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<float>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb,
float* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<double>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb,
double* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<c10::complex<float>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb,
c10::complex<float>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb,
reinterpret_cast<magmaFloatComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<c10::complex<double>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb,
c10::complex<double>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb,
reinterpret_cast<magmaDoubleComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
namespace {
/*
MAGMA can return errors both as a return value and in the info argument.
The return value and info should always be identical.
In general, the meaning is as given in this table.
Predefined error codes are large negative numbers. Using the symbolic
constants below is preferred, but the numeric values can be found in
include/magma_types.h.
Info | Description
----------- | -----------
info = 0 (MAGMA_SUCCESS) | Successful exit
info < 0, but small | For info = -i, the i-th argument had an illegal value
info > 0 | Function-specific error such as singular matrix
MAGMA_ERR_DEVICE_ALLOC | Could not allocate GPU device memory
MAGMA_ERR_HOST_ALLOC | Could not allocate CPU host memory
MAGMA_ERR_ILLEGAL_VALUE | An argument had an illegal value (deprecated; instead it should return -i to say the i-th argument was bad)
MAGMA_ERR_INVALID_PTR | Can't free pointer
MAGMA_ERR_NOT_IMPLEMENTED | Function or option not implemented
MAGMA_ERR_NOT_SUPPORTED | Function or option not supported on the current architecture
*/
void checkMagmaInternalError(magma_int_t info, const std::string& magma_function_name) {
// if info > 0 the error is function-specific, do nothing in this case
TORCH_CHECK(info >= 0,
"MAGMA error: ",
magma_strerror(info),
", info = ", info,
", when calling ", magma_function_name);
}
} // anonymous namespace
#endif // USE_MAGMA
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, Tensor& infos_out) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t lda = ::max(magma_int_t{1}, n);
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
// magmaSolve requires infos tensor to live on CPU
Tensor infos = at::empty(infos_out.sizes(), infos_out.options().device(kCPU));
magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(),
b_data, lda, infos.data_ptr<magma_int_t>());
infos_out.copy_(infos);
} else {
auto infos_data = infos_out.data_ptr<magma_int_t>();
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &infos_data[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda,
&infos_data[mini_idx], batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
// infos might not get filled for empty inputs therefore at::zeros is used instead of at::empty
auto infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// This is a type dispatching helper function for 'apply_solve'
Tensor& _linalg_solve_out_helper_cuda(Tensor& result, Tensor& input, Tensor& infos) {
// 'result' and 'input' should be in column major order (it should be checked before calling this function)
// the content of 'result', 'input' and 'infos' is overwritten by 'apply_solve'
// 'result' should contain data of 'other' tensor (right-hand-side of the linear system of equations)
// 'input' should contain data of origianl 'input' tensor (left-hand-side of the linear system)
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_solve_out_cpu", [&]{
apply_solve<scalar_t>(result, input, infos);
});
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of n-by-n matrix 'self', it is saved to 'self_inv'.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
'infos_lu' is for holding magmaLU errors, and 'infos_getri' is for holding magmaGetri errors
For more information see MAGMA's documentation for GETRI and GETRF routines.
*/
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
auto infos_lu_data = infos_lu.data_ptr<magma_int_t>();
auto infos_getri_data = infos_getri.data_ptr<magma_int_t>();
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
// MAGMA does not work with batch_size == 0, let's return early in this case
if (batch_size == 0) {
return;
}
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * lda);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
// magmaLuBatched leaves ipiv_data values unwritten for singular matrices.
// Initialize to avoid memory access violations inside magma kernels (gh-51930).
std::fill_n(ipiv_data, batch_size * n, 1);
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, lda, ipiv_array, infos_lu_data,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur_getri = &infos_getri_data[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, lda, ipiv_array_cur, self_inv_array_cur,
lda, info_array_cur_getri, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], lda, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
lda, &infos_getri_data[mini_idx], batch_size % batch_limit, magma_queue);
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
// magmaLu and magmaGetri requires infos tensor to live on CPU
infos_lu = infos_lu.to(at::kCPU);
infos_getri = infos_getri.to(at::kCPU);
Tensor ipiv = at::empty({lda}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, lda, ipiv.data_ptr<magma_int_t>(), infos_lu.data_ptr<magma_int_t>());
magmaGetri<scalar_t>(
n, self_data, lda, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, infos_getri.data_ptr<magma_int_t>());
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
auto infos_lu = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto infos_getri = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos_lu, infos_getri);
});
batchCheckErrors(infos_lu, "inverse_cuda");
batchCheckErrors(infos_getri, "inverse_cuda");
} else {
// magmaLu and magmaGetri requires infos tensor to live on CPU
auto infos_lu = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
auto infos_getri = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, infos_lu, infos_getri);
});
singleCheckErrors(infos_lu.item().toInt(), "inverse_cuda");
singleCheckErrors(infos_getri.item().toInt(), "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// This is a type dispatching helper function for 'apply_batched_inverse' and 'singleCheckErrors'
Tensor& _linalg_inv_out_helper_cuda_legacy(Tensor& result, Tensor& infos_lu, Tensor& infos_getri) {
// assuming result is in column major order and contains the matrices to invert
if (result.dim() > 2) {
auto input_working_copy = cloneBatchedColumnMajor(result);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_batched_inverse<scalar_t>(
input_working_copy, result, infos_lu, infos_getri);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_single_inverse<scalar_t>(result, infos_lu, infos_getri);
});
}
return result;
}
// This is a MAGMA/cuSOLVER dispatching helper function
Tensor& _linalg_inv_out_helper_cuda(Tensor &result, Tensor& infos_lu, Tensor& infos_getri) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
#ifdef USE_CUSOLVER
if ((result.dim() == 2) || (/* result.dim() > 2 && */ batchCount(result) <= 2) || !use_magma_) {
return _linalg_inv_out_helper_cuda_lib(result, infos_lu, infos_getri); // cusolver or cublas
} else {
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
}
#else
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
#endif
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, lda,
b_data, lda, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, lda, b_array_cur, lda,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda_magma(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// Todo: cusolverDn<T>potrsBatched only supports nrhs == 1 and does not have good performance.
// Batched cholesky_solve is dispatched to magma.
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(self) == 1 || !use_magma_) {
return _cholesky_solve_helper_cuda_cusolver(self, A, upper);
} else {
return _cholesky_solve_helper_cuda_magma(self, A, upper);
}
#else
return _cholesky_solve_helper_cuda_magma(self, A, upper);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
auto lda = std::max<magma_int_t>(1, n);
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
int64_t batch_limit = self.is_complex() ? 65535 : 262140;
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// For complex input the batch limit is 65535 (determined experimentally, see https://github.com/pytorch/pytorch/pull/47047#discussion_r516086923 for more information)
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, lda, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaCholeskyBatched<scalar_t>(
uplo, n, &self_array[mini_idx], lda, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda_magma(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor result;
if (self.dim() > 2) {
// MAGMA's batched cholesky operator has an off-by-one error causing IMA
// (see https://github.com/pytorch/pytorch/issues/42666). This code is based
// on the #cloneBatchedColumnMajor function however it pads the input with
// one extra element utilizing the fact that the resize_as_ method preserves
// the storage even if it's larger than the new sizes. This way if MAGMA
// reads off bounds it will still be valid user memory.
const Tensor input = upper ? self : self.transpose(-1, -2);
result = at::empty(input.numel() + 1, input.options());
result.resize_as_(input).copy_(input).transpose_(-1, -2);
} else {
result = cloneBatchedColumnMajor(upper ? self.transpose(-1, -2) : self);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
self.scalar_type(), "cholesky_cuda", [&] {
apply_cholesky<scalar_t>(result, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
return upper ? result.transpose_(-1, -2) : result;
}
// Todo: cusolverDnXpotrfBatched has some numerical issue and is not used
// here. Batched cholesky is dispatched to magma.
// We will switch to cusolverDnXpotrfBatched after the issue is fixed.
// See https://github.com/pytorch/pytorch/issues/53879.
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(self) == 1 || !use_magma_) {
return _cholesky_helper_cuda_cusolver(self, upper);
}
else {
return _cholesky_helper_cuda_magma(self, upper);
}
#else
return _cholesky_helper_cuda_magma(self, upper);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of a symmetric (Hermitian) positive-definite matrix n-by-n matrix 'input' using the Cholesky solver
This is an in-place routine, content of 'input' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
MAGMA requires 'infos' to reside in CPU memory.
For more information see MAGMA's documentation for POTRS routine.
*/
template <typename scalar_t>
static void apply_cholesky_inverse(Tensor& input, Tensor& infos, bool upper) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "cholesky_inverse: MAGMA library not found in compilation. Please rebuild with MAGMA.");
#else
// magmaCholeskyInverse (magma_dpotri_gpu) is slow because internally
// it transfers data several times between GPU and CPU and calls lapack routine on CPU
// using magmaCholeskySolveBatched is a lot faster
// note that magmaCholeskySolve is also slow
// 'input' is modified in-place we need to clone it and replace with a diagonal matrix
// for apply_cholesky_solve
auto input_working_copy = cloneBatchedColumnMajor(input);
// 'input' tensor has to be a batch of diagonal matrix
input.fill_(0);
input.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
Tensor result_u, input_u;
if (input.dim() == 2) {
// unsqueezing here so that the batched version is used
result_u = input.unsqueeze(0);
input_u = input_working_copy.unsqueeze(0);
} else {
result_u = input;
input_u = input_working_copy;
}
// magma's potrs_batched doesn't take matrix-wise array of ints as an 'info' argument
// it returns a single 'magma_int_t'
// if info = 0 the operation is successful, if info = -i, the i-th parameter had an illegal value.
int64_t info_tmp = 0;
apply_cholesky_solve<scalar_t>(result_u, input_u, upper, info_tmp);
infos.fill_(info_tmp);
#endif
}
// This is a type dispatching helper function for 'apply_cholesky_inverse'
Tensor& cholesky_inverse_kernel_impl_magma(Tensor &result, Tensor& infos, bool upper) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_inverse_out_cuda", [&]{
apply_cholesky_inverse<scalar_t>(result, infos, upper);
});
return result;
}
Tensor& cholesky_inverse_kernel_impl(Tensor &result, Tensor& infos, bool upper) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
// the content of result is overwritten by 'apply_cholesky_inverse'
#ifdef USE_CUSOLVER
if (batchCount(result) == 1 || !use_magma_) {
return cholesky_inverse_kernel_impl_cusolver(result, infos, upper);
} else {
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
}
#else
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
#endif
}
REGISTER_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_int_t k = ::min(m, n);
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute
// the partially-pivoted LU decomposition with / without pivots.
// The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots.
// The data is later copied back to the appropriate output tensor.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({k}, at::kInt);
magmaLu<scalar_t>(
m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
m, n, self_array, m, pivots_array,
infos.data_ptr<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
m, n, self_array, m, infos.data_ptr<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
TORCH_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
auto m = self.size(-2);
auto n = self.size(-1);
auto k = ::min(m, n);
auto req_size = self.sizes().vec();
req_size.pop_back();
req_size.back() = k;
Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous();
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true);
} else {
batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true);
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve_batched(Tensor& A, Tensor& b, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
trans = conjugate_transpose ? MagmaConjTrans : trans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
// magma returns early if m <= 0 || n <= 0 for magmaTriangularSolveBatched
// magmaTriangularSolve is calling cuBLAS and it prints
// ** On entry to DTRSM parameter number 9 had an illegal value
// so let's use proper lda parameter here
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit;
int64_t mini_idx; // this is outside the loop because it is used for the case batch_size % batch_limit != 0
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
lda, b_array_cur, lda, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
lda, &b_array[mini_idx], lda, batch_size % batch_limit, magma_queue);
}
#endif
}
void triangular_solve_batched_magma(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
(void)infos; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular);
});
}
void triangular_solve_kernel(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
// For batches smaller than 8 and matrix sizes larger than 64x64 cuBLAS forloop is faster than batched version
if (batchCount(A) <= 8 && A.size(-1) >= 64) {
triangular_solve_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
#ifndef USE_MAGMA
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
#else
// cuBLAS batched is faster than MAGMA batched up until 512x512, after that MAGMA is faster
if (A.size(-1) <= 512) {
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
triangular_solve_batched_magma(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
}
#endif // USE_MAGMA
}
}
REGISTER_DISPATCH(triangular_solve_stub, &triangular_solve_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ orgqr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tensor& orgqr_kernel_impl(Tensor& result, const Tensor& tau) {
// TODO: It is possible to implement efficient batched orgqr for small tau (tau.size(-1) <= 32)
// using MAGMA, however it fails on Windows because of some illegal memory reads inside MAGMA.
// See discussions in https://github.com/pytorch/pytorch/pull/51348 for comparison of cuSOLVER-MAGMA
// and Windows failure.
// For reference here is the MAGMA-based implementation: https://gist.github.com/IvanYashchuk/2db50002c9d3c1462ff769e6410ad983
#if defined(USE_CUSOLVER)
return orgqr_helper_cusolver(result, tau); // cusolver
#else
TORCH_CHECK(false, "Calling torch.orgqr on a CUDA tensor requires compiling ",
"PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support.");
#endif
}
REGISTER_DISPATCH(orgqr_stub, &orgqr_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_geqrf(const Tensor& input, const Tensor& tau) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.geqrf on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto input_data = input.data_ptr<scalar_t>();
auto input_matrix_stride = matrixStride(input);
auto tau_stride = tau.size(-1);
auto batch_size = batchCount(input);
auto lda = std::max<int>(1, m);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau_cpu = at::empty(tau.sizes(), tau.options().device(at::kCPU).pinned_memory(true));
scalar_t* tau_data = tau_cpu.data_ptr<scalar_t>();
scalar_t* work_data = nullptr; // workspace is not needed for geqrf2_gpu
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
// now compute the actual QR and tau
// MAGMA's geqrf2_gpu function is used, this version has LAPACK-complaint arguments.
magmaGeqrf<scalar_t>(m, n, input_working_ptr, lda, tau_working_ptr, work_data, &info, /*is_v2=*/true);
checkMagmaInternalError(info, "geqrf");
}
tau.copy_(tau_cpu, /*non_blocking=*/true);
#endif
}
// This is a type dispatching helper function for 'apply_geqrf'
void geqrf_magma(const Tensor& input, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_magma", [&]{
apply_geqrf<scalar_t>(input, tau);
});
}
// This is a backend library dispatching helper function for calling looped batch implementation
void geqrf_looped(const Tensor& input, const Tensor& tau) {
#if defined(USE_CUSOLVER)
return geqrf_cusolver(input, tau);
#else
return geqrf_magma(input, tau);
#endif
}
// This is a backend library dispatching helper function for calling specialized batched implementation
void geqrf_batched(const Tensor& input, const Tensor& tau) {
#ifdef CUDART_VERSION
// if cuBLAS is available
return geqrf_batched_cublas(input, tau);
#else
// TODO: implement MAGMA-based path using magma_zgeqrf_expert_batched
return geqrf_looped(input, tau);
#endif
}
void geqrf_kernel(const Tensor& input, const Tensor& tau) {
// if number of rows is smaller than 32 batched is always faster for batch size > 1
// for larger number of rows number of batches condition
if (input.size(-2) <= 256 && batchCount(input) >= std::max<int64_t>(2, input.size(-2) / 16)) {
return geqrf_batched(input, tau);
} else {
return geqrf_looped(input, tau);
}
}
REGISTER_DISPATCH(geqrf_stub, &geqrf_kernel);
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t q_size_minus_2, int64_t r_size_minus_1, int64_t n_columns,
bool compute_q) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_int_t m = magma_int_cast(q_size_minus_2, "Q.size(-2)");
magma_int_t n = magma_int_cast(r_size_minus_1, "R.size(-1)");
auto r_data = R.data_ptr<scalar_t>();
auto r_matrix_stride = matrixStride(R);
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
checkMagmaInternalError(info, "geqrf");
}
if (!compute_q) {
// this is for mode='r'
return;
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
auto q_data = Q.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
checkMagmaInternalError(info, "geqrf");
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
checkMagmaInternalError(info, "orgqr");
}
#endif
}
std::tuple<Tensor, Tensor> linalg_qr_helper_magma(const Tensor& self, std::string mode) {
bool compute_q, reduced;
std::tie(compute_q, reduced) = _parse_qr_mode(mode);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, reduced);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
int64_t n = self.size(-1);
auto r_shape = self.sizes().vec();
r_shape.end()[-2] = n_columns_q;
r_shape.end()[-1] = n;
r_working_copy = at::empty(r_shape, self.options());
if (compute_q) {
auto q_shape = q_sizes;
q_shape.end()[-1] = n_columns_q;
q_working_copy = at::zeros(q_shape, self.options());
q_working_copy.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
} else {
q_working_copy = at::empty({0}, self.options());
}
return std::make_tuple(q_working_copy, r_working_copy);
}
if (compute_q) {
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
} else {
q_working_copy = at::empty({0}, self.options());
}
r_working_copy = cloneBatchedColumnMajor(self);
int64_t m = q_sizes[self.dim() - 2];
int64_t n = r_working_copy.size(-1);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, m, n, n_columns_q, compute_q);
});
if (compute_q) {
q_working_copy = q_working_copy.narrow(-1, 0, n_columns_q);
}
r_working_copy = r_working_copy.narrow(-2, 0, n_columns_q).triu();
return std::make_tuple(q_working_copy, r_working_copy);
}
std::tuple<Tensor, Tensor> _linalg_qr_helper_cuda(const Tensor& input, std::string mode) {
#if defined(USE_CUSOLVER)
// _linalg_qr_helper_default is a generic function that is implemented using
// geqrf_stub and orgqr_stub. It dispatches to cuSOLVER for CUDA inputs if USE_CUSOLVER is defined
return _linalg_qr_helper_default(input, mode);
#else
return linalg_qr_helper_magma(input, mode);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_magma_eigh(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.linalg.eigh/eigvalsh on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == kCPU);
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(vectors.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(vectors);
auto vectors_stride = matrixStride(vectors);
auto values_stride = values.size(-1);
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, lda * lda);
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_data, lda, values_data,
wA, lda, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, infos_data);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(std::max<int64_t>(1, real_impl<scalar_t, value_t>(wkopt)), "work_size");
liwork = magma_int_cast(std::max<int64_t>(1, iwkopt), "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (vectors.is_complex()) {
lrwork = magma_int_cast(std::max<int64_t>(1, rwkopt), "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride];
value_t* values_working_ptr = &values_data[i * values_stride];
magma_int_t* info_working_ptr = &infos_data[i];
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_working_ptr, lda, values_working_ptr,
wA, lda, work, lwork, rwork, lrwork, iwork, liwork, info_working_ptr);
// The current behaviour for Linear Algebra functions to raise an error if something goes wrong
// or input doesn't satisfy some requirement
// therefore return early since further computations will be wasted anyway
if (*info_working_ptr != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
Tensor infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt).device(at::kCPU));
auto eigvals_shape = IntArrayRef(self.sizes().data(), self.dim()-1); // self.shape[:-1]
ScalarType real_dtype = toValueType(self.scalar_type());
// magmaSyevd uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(eigvals_shape, self.options().dtype(real_dtype))
: at::empty(eigvals_shape, self.options().dtype(real_dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_magma_eigh<scalar_t>(eigvals_working_copy, self_working_copy, infos, upper, eigenvectors);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eigh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// This is a type dispatch function for 'apply_magma_eigh'
// For small inputs result is computed on CPU
void linalg_eigh_magma(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
// MAGMA just calls LAPACK for eigenvectors.size(-1) <= 128
// See https://bitbucket.org/icl/magma/src/e6fdca447bd402693e8b0b950a898b6879bbcc41/src/zheevd_gpu.cpp?at=master#lines-258
// in addition lda is ignored breaking 0x0 inputs
if (eigenvectors.size(-1) > 128) {
// MAGMA requires eigenvalues and infos tensors to reside on CPU
Tensor eigenvalues_cpu = eigenvalues.to(kCPU);
infos = infos.to(kCPU);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
eigenvectors.scalar_type(), "linalg_eigh_cpu", [&] {
apply_magma_eigh<scalar_t>(
eigenvalues_cpu, eigenvectors, infos, upper, compute_eigenvectors);
});
// Transfer computed by MAGMA results from CPU to GPU
eigenvalues.copy_(eigenvalues_cpu);
} else { // eigenvectors.size(-1) <= 128
// transfer to CPU, compute the result and copy back to GPU
// this is faster than going through MAGMA that does the same
Tensor eigenvalues_cpu = at::empty_like(eigenvalues, eigenvalues.options().device(kCPU));
if (compute_eigenvectors) {
Tensor eigenvectors_cpu = at::empty_like(eigenvectors, eigenvectors.options().device(kCPU));
at::linalg_eigh_out(eigenvalues_cpu, eigenvectors_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
eigenvectors.copy_(eigenvectors_cpu);
} else {
at::linalg_eigvalsh_out(eigenvalues_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
}
eigenvalues.copy_(eigenvalues_cpu);
}
}
void linalg_eigh_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
#if defined(USE_CUSOLVER)
linalg_eigh_cusolver(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#else
linalg_eigh_magma(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#endif
}
REGISTER_DISPATCH(linalg_eigh_stub, &linalg_eigh_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// magmaEig uses a hybrid CPU-GPU algorithm, which takes and return CPU
// memory. So, we accept a GPU tensor, copy it to CPU memory, and later copy
// the returned values from CPU to GPU. See also magmaSymeig, which uses a
// similar approach.
template <typename scalar_t>
static void apply_eig(const Tensor& self, bool eigenvectors, Tensor& out_eigvals, Tensor& out_eigvecs,
int64_t *info_ptr) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT(self.device() == at::kCPU, "Internal error: apply_eig needs a CPU tensor");
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto self_data = self.data_ptr<scalar_t>();
auto out_eigvals_data = out_eigvals.data_ptr<scalar_t>();
scalar_t *wr = out_eigvals_data;
scalar_t *vr_data = NULL;
magma_int_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = out_eigvecs.data_ptr<scalar_t>();
ldvr = n;
}
value_t *rwork_data = nullptr;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
ALLOCATE_ARRAY(rwork_data, value_t, n*2);
}
if (n > 0) {
// call magmaEig once to get the optimal size of work_data
scalar_t wkopt;
magma_int_t info;
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, &wkopt, -1, rwork_data, &info);
magma_int_t lwork = static_cast<magma_int_t>(real_impl<scalar_t, value_t>(wkopt));
// call it a 2nd time to to the actual work
scalar_t *work_data = nullptr;
ALLOCATE_ARRAY(work_data, scalar_t, lwork);
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, work_data, lwork, rwork_data, &info);
*info_ptr = info;
}
#endif
}
/*
* Internal helper; like eig_cuda but:
* 1. assume that self is a square matrix of side "n"
* 2. return CPU tensors (because this is what magmaEig returns), which will be copied to GPU memory
* by the caller
*/
std::tuple<Tensor, Tensor> eig_kernel_impl(const Tensor& self, bool& eigenvectors) {
int64_t n = self.size(-1);
// copy self to pinned CPU memory
auto self_working_copy = at::empty_strided(
{n, n}, // square matrix
{1, n}, // column-ordered, as magmaEig expects
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
// tensors holding the results. We use empty_strided to make them column-ordered
auto options = self.options().device(at::kCPU).memory_format(LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor out_eigvals;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
out_eigvals = at::empty({n}, options);
} else {
out_eigvals = at::empty_strided({n, 2}, {1, n}, options);
}
auto out_eigvecs = eigenvectors
? at::empty_strided({n, n}, {1, n}, options)
: Tensor();
int64_t info;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "eig_cuda", [&]{
apply_eig<scalar_t>(self_working_copy, eigenvectors, out_eigvals, out_eigvecs, &info);
});
singleCheckErrors(info, "eig_cuda");
return std::tuple<Tensor, Tensor>(out_eigvals, out_eigvecs);
}
REGISTER_DISPATCH(eig_stub, &eig_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the eigenvalues and eigenvectors of n-by-n matrix 'input'.
This is an in-place routine, content of 'input', 'values', 'vectors' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
For more information see MAGMA's documentation for GEEV routine.
*/
template <typename scalar_t>
void apply_linalg_eig(Tensor& values, Tensor& vectors, Tensor& input, Tensor& infos, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.linalg.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.linalg.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == at::kCPU);
if (compute_eigenvectors) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(vectors.device() == at::kCPU);
}
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_vec_t jobvl = MagmaNoVec; // only right eigenvectors are computed
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(input);
auto input_matrix_stride = matrixStride(input);
auto values_stride = values.size(-1);
auto input_data = input.data_ptr<scalar_t>();
auto values_data = values.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
auto rvectors_data = compute_eigenvectors ? vectors.data_ptr<scalar_t>() : nullptr;
scalar_t* lvectors_data = nullptr; // only right eigenvectors are computed
int64_t ldvr = compute_eigenvectors ? lda : 1;
int64_t ldvl = 1;
Tensor rwork;
value_t* rwork_data = nullptr;
if (input.is_complex()) {
ScalarType real_dtype = toValueType(input.scalar_type());
rwork = at::empty({lda * 2}, input.options().dtype(real_dtype));
rwork_data = rwork.data_ptr<value_t>();
}
// call magmaEig once to get the optimal size of work_data
scalar_t work_query;
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_data, lda, values_data,
lvectors_data, ldvl, rvectors_data, ldvr, &work_query, -1, rwork_data, &infos_data[0]);
magma_int_t lwork = std::max<magma_int_t>(1, static_cast<magma_int_t>(real_impl<scalar_t, value_t>(work_query)));
Tensor work = at::empty({lwork}, input.dtype());
auto work_data = work.data_ptr<scalar_t>();
for (auto i = decltype(batch_size){0}; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* values_working_ptr = &values_data[i * values_stride];
scalar_t* rvectors_working_ptr = compute_eigenvectors ? &rvectors_data[i * input_matrix_stride] : nullptr;
int* info_working_ptr = &infos_data[i];
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_working_ptr, lda, values_working_ptr,
lvectors_data, ldvl, rvectors_working_ptr, ldvr, work_data, lwork, rwork_data, info_working_ptr);
}
#endif
}
// This is a type dispatching helper function for 'apply_linalg_eig'
void linalg_eig_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, const Tensor& input, bool compute_eigenvectors) {
// This function calculates the non-symmetric eigendecomposition in-place
// tensors should be in batched column major memory format
// the content of eigenvalues, eigenvectors and infos is overwritten by 'apply_linalg_eig'
// apply_linalg_eig modifies the provided input matrix in-place, therefore we need a copy
// MAGMA doesn't have GPU interface for the eigendecomposition and it forces us to transfer 'input' to CPU
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.is_cuda());
Tensor input_working_copy = at::empty(input.sizes(), input.options().device(kCPU));
input_working_copy.transpose_(-2, -1); // make input_working_copy to have Fortran contiguous memory layout
input_working_copy.copy_(input);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "linalg_eig_out_cuda", [&]{
apply_linalg_eig<scalar_t>(eigenvalues, eigenvectors, input_working_copy, infos, compute_eigenvectors);
});
}
REGISTER_DISPATCH(linalg_eig_stub, &linalg_eig_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto lda = std::max<magma_int_t>(1, m);
auto ldvt = std::max<magma_int_t>(1, n);
auto mn = ::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt = 1; // MAGMA might not set the value for the optimal workspace therefore use 1 as the default value
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, lda,
S_working_ptr, U_working_ptr, lda, VT_working_ptr, ldvt, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_legacy(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = ::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (!compute_uv) {
VT_working_copy.zero_();
U_working_copy.zero_();
}
if (some) {
VT_working_copy = VT_working_copy.narrow(-2, 0, k);
}
// so far we have computed VT, but torch.svd returns V instead. Adjust accordingly.
// Note that the 'apply_svd' routine returns VT = V^T (for real inputs) or VT = V^H (for complex inputs), not V.
VT_working_copy = VT_working_copy.conj();
VT_working_copy.transpose_(-2, -1);
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
#ifdef USE_CUSOLVER
return _svd_helper_cuda_lib(self, some, compute_uv);
#else
return _svd_helper_cuda_legacy(self, some, compute_uv);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("lu_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto n = lu.size(-2);
auto nrhs = b.size(-1);
int info_tmp = 0;
if (b.dim() == 2) {
Tensor pivots_tmp = pivots.cpu();
magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp);
info = info_tmp;
} else {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaLuSolveBatched<scalar_t>(
n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data);
auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous();
if (self.numel() == 0 || LU_data.numel() == 0) {
return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{
apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info);
});
TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lstsq ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_gels(const Tensor& a, Tensor& b, Tensor& infos) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "torch.linalg.lstsq: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto trans = MagmaNoTrans;
auto m = magma_int_cast(a.size(-2), "m");
auto n = magma_int_cast(a.size(-1), "n");
TORCH_CHECK(
m >= n,
"torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA");
auto nrhs = magma_int_cast(b.size(-1), "nrhs");
auto ldda = std::max<magma_int_t>(1, m);
auto lddb = std::max<magma_int_t>(1, ::max(m, n));
auto nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
auto lwork = (m - n + nb) * (nrhs + nb) + nrhs * nb;
Tensor hwork = at::empty({static_cast<int64_t>(lwork)}, a.scalar_type());
auto* hwork_ptr = hwork.data_ptr<scalar_t>();
// MAGMA requires infos tensor to live on CPU
infos = infos.to(at::kCPU);
auto infos_data = infos.data_ptr<magma_int_t>();
batch_iterator_with_broadcasting<scalar_t>(a, b,
[&](scalar_t* a_working_ptr, scalar_t* b_working_ptr,
int64_t a_linear_batch_idx) {
magma_int_t* infos_working_ptr = &infos_data[a_linear_batch_idx];
magmaGels<scalar_t>(trans, m, n, nrhs,
a_working_ptr, ldda, b_working_ptr, lddb,
hwork_ptr, lwork, infos_working_ptr);
}
);
#endif
}
void lstsq_kernel(const Tensor& a, Tensor& b, Tensor& rank, Tensor& singular_values, Tensor& infos, double rcond, std::string driver_name) {
(void)rank; // unused
(void)singular_values; // unused
(void)rcond; // unused
(void)driver_name; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(a.scalar_type(), "linalg_lstsq_cuda", [&] {
apply_gels<scalar_t>(a, b, infos);
});
}
REGISTER_DISPATCH(lstsq_stub, &lstsq_kernel);
}} // namespace at::native
#undef ALLOCATE_ARRAY
| 7a0dc242a5d661a802011cd41fe34cf06d5b9d04.cu | #include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/PinnedMemoryAllocator.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/cuda/MiscUtils.h>
#include <ATen/native/Resize.h>
#include <ATen/native/BatchLinearAlgebra.h>
#include <ATen/native/cuda/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THC/THC.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma_types.h>
#include <magma_v2.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSyevd(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaEig(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, scalar_t *A, magma_int_t lda,
scalar_t *w, scalar_t *VL, magma_int_t ldvl,
scalar_t *VR, magma_int_t ldvr, scalar_t *work, magma_int_t lwork,
value_t *rwork,
magma_int_t *info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaGels(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb,
scalar_t* hwork, magma_int_t lwork, magma_int_t* info);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesv_gpu(n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesv_gpu(n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_zgesv_batched(n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_cgesv_batched(n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>(
magma_int_t n) {
return magma_get_zgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>(
magma_int_t n) {
return magma_get_cgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetri<c10::complex<double>>(
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<double>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetri_gpu(
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaDoubleComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetri<c10::complex<float>>(
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<float>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetri_gpu(
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaFloatComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<double>>(
magma_int_t n,
c10::complex<double>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<double>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetri_outofplace_batched(
n,
reinterpret_cast<magmaDoubleComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<float>>(
magma_int_t n,
c10::complex<float>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<float>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetri_outofplace_batched(
n,
reinterpret_cast<magmaFloatComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaFloatComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaDoubleComplex alpha({1, 0});
magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaFloatComplex alpha({1, 0});
magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
double *A, magma_int_t lda,
double *w,
double *VL, magma_int_t ldvl,
double *VR, magma_int_t ldvr,
double *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
// magma [sd]geev wants to separate output arrays: wr and wi for the real
// and imaginary parts
double *wr = w;
double *wi = w + n;
(void)rwork; // unused
magma_dgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
float *A, magma_int_t lda,
float *w,
float *VL, magma_int_t ldvl,
float *VR, magma_int_t ldvr,
float *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
float *wr = w;
float *wi = w + n;
(void)rwork; // unused
magma_sgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<c10::complex<double>, double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<double> *A, magma_int_t lda,
c10::complex<double> *w,
c10::complex<double> *VL, magma_int_t ldvl,
c10::complex<double> *VR, magma_int_t ldvr,
c10::complex<double> *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_zgeev(jobvl, jobvr, n,
reinterpret_cast<magmaDoubleComplex*>(A), lda,
reinterpret_cast<magmaDoubleComplex*>(w),
reinterpret_cast<magmaDoubleComplex*>(VL), ldvl,
reinterpret_cast<magmaDoubleComplex*>(VR), ldvr,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<c10::complex<float>, float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<float> *A, magma_int_t lda,
c10::complex<float> *w,
c10::complex<float> *VL, magma_int_t ldvl,
c10::complex<float> *VR, magma_int_t ldvr,
c10::complex<float> *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_cgeev(jobvl, jobvr, n,
reinterpret_cast<magmaFloatComplex*>(A), lda,
reinterpret_cast<magmaFloatComplex*>(w),
reinterpret_cast<magmaFloatComplex*>(VL), ldvl,
reinterpret_cast<magmaFloatComplex*>(VR), ldvr,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<float>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb,
float* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<double>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb,
double* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<c10::complex<float>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb,
c10::complex<float>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb,
reinterpret_cast<magmaFloatComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<c10::complex<double>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb,
c10::complex<double>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb,
reinterpret_cast<magmaDoubleComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
namespace {
/*
MAGMA can return errors both as a return value and in the info argument.
The return value and info should always be identical.
In general, the meaning is as given in this table.
Predefined error codes are large negative numbers. Using the symbolic
constants below is preferred, but the numeric values can be found in
include/magma_types.h.
Info | Description
----------- | -----------
info = 0 (MAGMA_SUCCESS) | Successful exit
info < 0, but small | For info = -i, the i-th argument had an illegal value
info > 0 | Function-specific error such as singular matrix
MAGMA_ERR_DEVICE_ALLOC | Could not allocate GPU device memory
MAGMA_ERR_HOST_ALLOC | Could not allocate CPU host memory
MAGMA_ERR_ILLEGAL_VALUE | An argument had an illegal value (deprecated; instead it should return -i to say the i-th argument was bad)
MAGMA_ERR_INVALID_PTR | Can't free pointer
MAGMA_ERR_NOT_IMPLEMENTED | Function or option not implemented
MAGMA_ERR_NOT_SUPPORTED | Function or option not supported on the current architecture
*/
void checkMagmaInternalError(magma_int_t info, const std::string& magma_function_name) {
// if info > 0 the error is function-specific, do nothing in this case
TORCH_CHECK(info >= 0,
"MAGMA error: ",
magma_strerror(info),
", info = ", info,
", when calling ", magma_function_name);
}
} // anonymous namespace
#endif // USE_MAGMA
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, Tensor& infos_out) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t lda = std::max(magma_int_t{1}, n);
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
// magmaSolve requires infos tensor to live on CPU
Tensor infos = at::empty(infos_out.sizes(), infos_out.options().device(kCPU));
magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(),
b_data, lda, infos.data_ptr<magma_int_t>());
infos_out.copy_(infos);
} else {
auto infos_data = infos_out.data_ptr<magma_int_t>();
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &infos_data[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda,
&infos_data[mini_idx], batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
// infos might not get filled for empty inputs therefore at::zeros is used instead of at::empty
auto infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// This is a type dispatching helper function for 'apply_solve'
Tensor& _linalg_solve_out_helper_cuda(Tensor& result, Tensor& input, Tensor& infos) {
// 'result' and 'input' should be in column major order (it should be checked before calling this function)
// the content of 'result', 'input' and 'infos' is overwritten by 'apply_solve'
// 'result' should contain data of 'other' tensor (right-hand-side of the linear system of equations)
// 'input' should contain data of origianl 'input' tensor (left-hand-side of the linear system)
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_solve_out_cpu", [&]{
apply_solve<scalar_t>(result, input, infos);
});
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of n-by-n matrix 'self', it is saved to 'self_inv'.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
'infos_lu' is for holding magmaLU errors, and 'infos_getri' is for holding magmaGetri errors
For more information see MAGMA's documentation for GETRI and GETRF routines.
*/
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
auto infos_lu_data = infos_lu.data_ptr<magma_int_t>();
auto infos_getri_data = infos_getri.data_ptr<magma_int_t>();
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
// MAGMA does not work with batch_size == 0, let's return early in this case
if (batch_size == 0) {
return;
}
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * lda);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
// magmaLuBatched leaves ipiv_data values unwritten for singular matrices.
// Initialize to avoid memory access violations inside magma kernels (gh-51930).
std::fill_n(ipiv_data, batch_size * n, 1);
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, lda, ipiv_array, infos_lu_data,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur_getri = &infos_getri_data[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, lda, ipiv_array_cur, self_inv_array_cur,
lda, info_array_cur_getri, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], lda, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
lda, &infos_getri_data[mini_idx], batch_size % batch_limit, magma_queue);
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
// magmaLu and magmaGetri requires infos tensor to live on CPU
infos_lu = infos_lu.to(at::kCPU);
infos_getri = infos_getri.to(at::kCPU);
Tensor ipiv = at::empty({lda}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, lda, ipiv.data_ptr<magma_int_t>(), infos_lu.data_ptr<magma_int_t>());
magmaGetri<scalar_t>(
n, self_data, lda, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, infos_getri.data_ptr<magma_int_t>());
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
auto infos_lu = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto infos_getri = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos_lu, infos_getri);
});
batchCheckErrors(infos_lu, "inverse_cuda");
batchCheckErrors(infos_getri, "inverse_cuda");
} else {
// magmaLu and magmaGetri requires infos tensor to live on CPU
auto infos_lu = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
auto infos_getri = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, infos_lu, infos_getri);
});
singleCheckErrors(infos_lu.item().toInt(), "inverse_cuda");
singleCheckErrors(infos_getri.item().toInt(), "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// This is a type dispatching helper function for 'apply_batched_inverse' and 'singleCheckErrors'
Tensor& _linalg_inv_out_helper_cuda_legacy(Tensor& result, Tensor& infos_lu, Tensor& infos_getri) {
// assuming result is in column major order and contains the matrices to invert
if (result.dim() > 2) {
auto input_working_copy = cloneBatchedColumnMajor(result);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_batched_inverse<scalar_t>(
input_working_copy, result, infos_lu, infos_getri);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_single_inverse<scalar_t>(result, infos_lu, infos_getri);
});
}
return result;
}
// This is a MAGMA/cuSOLVER dispatching helper function
Tensor& _linalg_inv_out_helper_cuda(Tensor &result, Tensor& infos_lu, Tensor& infos_getri) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
#ifdef USE_CUSOLVER
if ((result.dim() == 2) || (/* result.dim() > 2 && */ batchCount(result) <= 2) || !use_magma_) {
return _linalg_inv_out_helper_cuda_lib(result, infos_lu, infos_getri); // cusolver or cublas
} else {
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
}
#else
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
#endif
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, lda,
b_data, lda, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, lda, b_array_cur, lda,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda_magma(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// Todo: cusolverDn<T>potrsBatched only supports nrhs == 1 and does not have good performance.
// Batched cholesky_solve is dispatched to magma.
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(self) == 1 || !use_magma_) {
return _cholesky_solve_helper_cuda_cusolver(self, A, upper);
} else {
return _cholesky_solve_helper_cuda_magma(self, A, upper);
}
#else
return _cholesky_solve_helper_cuda_magma(self, A, upper);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
auto lda = std::max<magma_int_t>(1, n);
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
int64_t batch_limit = self.is_complex() ? 65535 : 262140;
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// For complex input the batch limit is 65535 (determined experimentally, see https://github.com/pytorch/pytorch/pull/47047#discussion_r516086923 for more information)
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, lda, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaCholeskyBatched<scalar_t>(
uplo, n, &self_array[mini_idx], lda, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda_magma(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor result;
if (self.dim() > 2) {
// MAGMA's batched cholesky operator has an off-by-one error causing IMA
// (see https://github.com/pytorch/pytorch/issues/42666). This code is based
// on the #cloneBatchedColumnMajor function however it pads the input with
// one extra element utilizing the fact that the resize_as_ method preserves
// the storage even if it's larger than the new sizes. This way if MAGMA
// reads off bounds it will still be valid user memory.
const Tensor input = upper ? self : self.transpose(-1, -2);
result = at::empty(input.numel() + 1, input.options());
result.resize_as_(input).copy_(input).transpose_(-1, -2);
} else {
result = cloneBatchedColumnMajor(upper ? self.transpose(-1, -2) : self);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
self.scalar_type(), "cholesky_cuda", [&] {
apply_cholesky<scalar_t>(result, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
return upper ? result.transpose_(-1, -2) : result;
}
// Todo: cusolverDnXpotrfBatched has some numerical issue and is not used
// here. Batched cholesky is dispatched to magma.
// We will switch to cusolverDnXpotrfBatched after the issue is fixed.
// See https://github.com/pytorch/pytorch/issues/53879.
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(self) == 1 || !use_magma_) {
return _cholesky_helper_cuda_cusolver(self, upper);
}
else {
return _cholesky_helper_cuda_magma(self, upper);
}
#else
return _cholesky_helper_cuda_magma(self, upper);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of a symmetric (Hermitian) positive-definite matrix n-by-n matrix 'input' using the Cholesky solver
This is an in-place routine, content of 'input' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
MAGMA requires 'infos' to reside in CPU memory.
For more information see MAGMA's documentation for POTRS routine.
*/
template <typename scalar_t>
static void apply_cholesky_inverse(Tensor& input, Tensor& infos, bool upper) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "cholesky_inverse: MAGMA library not found in compilation. Please rebuild with MAGMA.");
#else
// magmaCholeskyInverse (magma_dpotri_gpu) is slow because internally
// it transfers data several times between GPU and CPU and calls lapack routine on CPU
// using magmaCholeskySolveBatched is a lot faster
// note that magmaCholeskySolve is also slow
// 'input' is modified in-place we need to clone it and replace with a diagonal matrix
// for apply_cholesky_solve
auto input_working_copy = cloneBatchedColumnMajor(input);
// 'input' tensor has to be a batch of diagonal matrix
input.fill_(0);
input.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
Tensor result_u, input_u;
if (input.dim() == 2) {
// unsqueezing here so that the batched version is used
result_u = input.unsqueeze(0);
input_u = input_working_copy.unsqueeze(0);
} else {
result_u = input;
input_u = input_working_copy;
}
// magma's potrs_batched doesn't take matrix-wise array of ints as an 'info' argument
// it returns a single 'magma_int_t'
// if info = 0 the operation is successful, if info = -i, the i-th parameter had an illegal value.
int64_t info_tmp = 0;
apply_cholesky_solve<scalar_t>(result_u, input_u, upper, info_tmp);
infos.fill_(info_tmp);
#endif
}
// This is a type dispatching helper function for 'apply_cholesky_inverse'
Tensor& cholesky_inverse_kernel_impl_magma(Tensor &result, Tensor& infos, bool upper) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_inverse_out_cuda", [&]{
apply_cholesky_inverse<scalar_t>(result, infos, upper);
});
return result;
}
Tensor& cholesky_inverse_kernel_impl(Tensor &result, Tensor& infos, bool upper) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
// the content of result is overwritten by 'apply_cholesky_inverse'
#ifdef USE_CUSOLVER
if (batchCount(result) == 1 || !use_magma_) {
return cholesky_inverse_kernel_impl_cusolver(result, infos, upper);
} else {
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
}
#else
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
#endif
}
REGISTER_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_int_t k = std::min(m, n);
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute
// the partially-pivoted LU decomposition with / without pivots.
// The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots.
// The data is later copied back to the appropriate output tensor.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({k}, at::kInt);
magmaLu<scalar_t>(
m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
m, n, self_array, m, pivots_array,
infos.data_ptr<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
m, n, self_array, m, infos.data_ptr<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
TORCH_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
auto m = self.size(-2);
auto n = self.size(-1);
auto k = std::min(m, n);
auto req_size = self.sizes().vec();
req_size.pop_back();
req_size.back() = k;
Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous();
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true);
} else {
batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true);
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve_batched(Tensor& A, Tensor& b, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
trans = conjugate_transpose ? MagmaConjTrans : trans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
// magma returns early if m <= 0 || n <= 0 for magmaTriangularSolveBatched
// magmaTriangularSolve is calling cuBLAS and it prints
// ** On entry to DTRSM parameter number 9 had an illegal value
// so let's use proper lda parameter here
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit;
int64_t mini_idx; // this is outside the loop because it is used for the case batch_size % batch_limit != 0
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
lda, b_array_cur, lda, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
lda, &b_array[mini_idx], lda, batch_size % batch_limit, magma_queue);
}
#endif
}
void triangular_solve_batched_magma(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
(void)infos; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular);
});
}
void triangular_solve_kernel(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
// For batches smaller than 8 and matrix sizes larger than 64x64 cuBLAS forloop is faster than batched version
if (batchCount(A) <= 8 && A.size(-1) >= 64) {
triangular_solve_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
#ifndef USE_MAGMA
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
#else
// cuBLAS batched is faster than MAGMA batched up until 512x512, after that MAGMA is faster
if (A.size(-1) <= 512) {
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
triangular_solve_batched_magma(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
}
#endif // USE_MAGMA
}
}
REGISTER_DISPATCH(triangular_solve_stub, &triangular_solve_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ orgqr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tensor& orgqr_kernel_impl(Tensor& result, const Tensor& tau) {
// TODO: It is possible to implement efficient batched orgqr for small tau (tau.size(-1) <= 32)
// using MAGMA, however it fails on Windows because of some illegal memory reads inside MAGMA.
// See discussions in https://github.com/pytorch/pytorch/pull/51348 for comparison of cuSOLVER-MAGMA
// and Windows failure.
// For reference here is the MAGMA-based implementation: https://gist.github.com/IvanYashchuk/2db50002c9d3c1462ff769e6410ad983
#if defined(USE_CUSOLVER)
return orgqr_helper_cusolver(result, tau); // cusolver
#else
TORCH_CHECK(false, "Calling torch.orgqr on a CUDA tensor requires compiling ",
"PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support.");
#endif
}
REGISTER_DISPATCH(orgqr_stub, &orgqr_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_geqrf(const Tensor& input, const Tensor& tau) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.geqrf on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto input_data = input.data_ptr<scalar_t>();
auto input_matrix_stride = matrixStride(input);
auto tau_stride = tau.size(-1);
auto batch_size = batchCount(input);
auto lda = std::max<int>(1, m);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau_cpu = at::empty(tau.sizes(), tau.options().device(at::kCPU).pinned_memory(true));
scalar_t* tau_data = tau_cpu.data_ptr<scalar_t>();
scalar_t* work_data = nullptr; // workspace is not needed for geqrf2_gpu
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
// now compute the actual QR and tau
// MAGMA's geqrf2_gpu function is used, this version has LAPACK-complaint arguments.
magmaGeqrf<scalar_t>(m, n, input_working_ptr, lda, tau_working_ptr, work_data, &info, /*is_v2=*/true);
checkMagmaInternalError(info, "geqrf");
}
tau.copy_(tau_cpu, /*non_blocking=*/true);
#endif
}
// This is a type dispatching helper function for 'apply_geqrf'
void geqrf_magma(const Tensor& input, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_magma", [&]{
apply_geqrf<scalar_t>(input, tau);
});
}
// This is a backend library dispatching helper function for calling looped batch implementation
void geqrf_looped(const Tensor& input, const Tensor& tau) {
#if defined(USE_CUSOLVER)
return geqrf_cusolver(input, tau);
#else
return geqrf_magma(input, tau);
#endif
}
// This is a backend library dispatching helper function for calling specialized batched implementation
void geqrf_batched(const Tensor& input, const Tensor& tau) {
#ifdef CUDART_VERSION
// if cuBLAS is available
return geqrf_batched_cublas(input, tau);
#else
// TODO: implement MAGMA-based path using magma_zgeqrf_expert_batched
return geqrf_looped(input, tau);
#endif
}
void geqrf_kernel(const Tensor& input, const Tensor& tau) {
// if number of rows is smaller than 32 batched is always faster for batch size > 1
// for larger number of rows number of batches condition
if (input.size(-2) <= 256 && batchCount(input) >= std::max<int64_t>(2, input.size(-2) / 16)) {
return geqrf_batched(input, tau);
} else {
return geqrf_looped(input, tau);
}
}
REGISTER_DISPATCH(geqrf_stub, &geqrf_kernel);
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t q_size_minus_2, int64_t r_size_minus_1, int64_t n_columns,
bool compute_q) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_int_t m = magma_int_cast(q_size_minus_2, "Q.size(-2)");
magma_int_t n = magma_int_cast(r_size_minus_1, "R.size(-1)");
auto r_data = R.data_ptr<scalar_t>();
auto r_matrix_stride = matrixStride(R);
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
checkMagmaInternalError(info, "geqrf");
}
if (!compute_q) {
// this is for mode='r'
return;
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
auto q_data = Q.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
checkMagmaInternalError(info, "geqrf");
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
checkMagmaInternalError(info, "orgqr");
}
#endif
}
std::tuple<Tensor, Tensor> linalg_qr_helper_magma(const Tensor& self, std::string mode) {
bool compute_q, reduced;
std::tie(compute_q, reduced) = _parse_qr_mode(mode);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, reduced);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
int64_t n = self.size(-1);
auto r_shape = self.sizes().vec();
r_shape.end()[-2] = n_columns_q;
r_shape.end()[-1] = n;
r_working_copy = at::empty(r_shape, self.options());
if (compute_q) {
auto q_shape = q_sizes;
q_shape.end()[-1] = n_columns_q;
q_working_copy = at::zeros(q_shape, self.options());
q_working_copy.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
} else {
q_working_copy = at::empty({0}, self.options());
}
return std::make_tuple(q_working_copy, r_working_copy);
}
if (compute_q) {
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
} else {
q_working_copy = at::empty({0}, self.options());
}
r_working_copy = cloneBatchedColumnMajor(self);
int64_t m = q_sizes[self.dim() - 2];
int64_t n = r_working_copy.size(-1);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, m, n, n_columns_q, compute_q);
});
if (compute_q) {
q_working_copy = q_working_copy.narrow(-1, 0, n_columns_q);
}
r_working_copy = r_working_copy.narrow(-2, 0, n_columns_q).triu();
return std::make_tuple(q_working_copy, r_working_copy);
}
std::tuple<Tensor, Tensor> _linalg_qr_helper_cuda(const Tensor& input, std::string mode) {
#if defined(USE_CUSOLVER)
// _linalg_qr_helper_default is a generic function that is implemented using
// geqrf_stub and orgqr_stub. It dispatches to cuSOLVER for CUDA inputs if USE_CUSOLVER is defined
return _linalg_qr_helper_default(input, mode);
#else
return linalg_qr_helper_magma(input, mode);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_magma_eigh(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.linalg.eigh/eigvalsh on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == kCPU);
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(vectors.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(vectors);
auto vectors_stride = matrixStride(vectors);
auto values_stride = values.size(-1);
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, lda * lda);
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_data, lda, values_data,
wA, lda, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, infos_data);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(std::max<int64_t>(1, real_impl<scalar_t, value_t>(wkopt)), "work_size");
liwork = magma_int_cast(std::max<int64_t>(1, iwkopt), "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (vectors.is_complex()) {
lrwork = magma_int_cast(std::max<int64_t>(1, rwkopt), "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride];
value_t* values_working_ptr = &values_data[i * values_stride];
magma_int_t* info_working_ptr = &infos_data[i];
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_working_ptr, lda, values_working_ptr,
wA, lda, work, lwork, rwork, lrwork, iwork, liwork, info_working_ptr);
// The current behaviour for Linear Algebra functions to raise an error if something goes wrong
// or input doesn't satisfy some requirement
// therefore return early since further computations will be wasted anyway
if (*info_working_ptr != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
Tensor infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt).device(at::kCPU));
auto eigvals_shape = IntArrayRef(self.sizes().data(), self.dim()-1); // self.shape[:-1]
ScalarType real_dtype = toValueType(self.scalar_type());
// magmaSyevd uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(eigvals_shape, self.options().dtype(real_dtype))
: at::empty(eigvals_shape, self.options().dtype(real_dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_magma_eigh<scalar_t>(eigvals_working_copy, self_working_copy, infos, upper, eigenvectors);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eigh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// This is a type dispatch function for 'apply_magma_eigh'
// For small inputs result is computed on CPU
void linalg_eigh_magma(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
// MAGMA just calls LAPACK for eigenvectors.size(-1) <= 128
// See https://bitbucket.org/icl/magma/src/e6fdca447bd402693e8b0b950a898b6879bbcc41/src/zheevd_gpu.cpp?at=master#lines-258
// in addition lda is ignored breaking 0x0 inputs
if (eigenvectors.size(-1) > 128) {
// MAGMA requires eigenvalues and infos tensors to reside on CPU
Tensor eigenvalues_cpu = eigenvalues.to(kCPU);
infos = infos.to(kCPU);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
eigenvectors.scalar_type(), "linalg_eigh_cpu", [&] {
apply_magma_eigh<scalar_t>(
eigenvalues_cpu, eigenvectors, infos, upper, compute_eigenvectors);
});
// Transfer computed by MAGMA results from CPU to GPU
eigenvalues.copy_(eigenvalues_cpu);
} else { // eigenvectors.size(-1) <= 128
// transfer to CPU, compute the result and copy back to GPU
// this is faster than going through MAGMA that does the same
Tensor eigenvalues_cpu = at::empty_like(eigenvalues, eigenvalues.options().device(kCPU));
if (compute_eigenvectors) {
Tensor eigenvectors_cpu = at::empty_like(eigenvectors, eigenvectors.options().device(kCPU));
at::linalg_eigh_out(eigenvalues_cpu, eigenvectors_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
eigenvectors.copy_(eigenvectors_cpu);
} else {
at::linalg_eigvalsh_out(eigenvalues_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
}
eigenvalues.copy_(eigenvalues_cpu);
}
}
void linalg_eigh_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
#if defined(USE_CUSOLVER)
linalg_eigh_cusolver(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#else
linalg_eigh_magma(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#endif
}
REGISTER_DISPATCH(linalg_eigh_stub, &linalg_eigh_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// magmaEig uses a hybrid CPU-GPU algorithm, which takes and return CPU
// memory. So, we accept a GPU tensor, copy it to CPU memory, and later copy
// the returned values from CPU to GPU. See also magmaSymeig, which uses a
// similar approach.
template <typename scalar_t>
static void apply_eig(const Tensor& self, bool eigenvectors, Tensor& out_eigvals, Tensor& out_eigvecs,
int64_t *info_ptr) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT(self.device() == at::kCPU, "Internal error: apply_eig needs a CPU tensor");
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto self_data = self.data_ptr<scalar_t>();
auto out_eigvals_data = out_eigvals.data_ptr<scalar_t>();
scalar_t *wr = out_eigvals_data;
scalar_t *vr_data = NULL;
magma_int_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = out_eigvecs.data_ptr<scalar_t>();
ldvr = n;
}
value_t *rwork_data = nullptr;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
ALLOCATE_ARRAY(rwork_data, value_t, n*2);
}
if (n > 0) {
// call magmaEig once to get the optimal size of work_data
scalar_t wkopt;
magma_int_t info;
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, &wkopt, -1, rwork_data, &info);
magma_int_t lwork = static_cast<magma_int_t>(real_impl<scalar_t, value_t>(wkopt));
// call it a 2nd time to to the actual work
scalar_t *work_data = nullptr;
ALLOCATE_ARRAY(work_data, scalar_t, lwork);
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, work_data, lwork, rwork_data, &info);
*info_ptr = info;
}
#endif
}
/*
* Internal helper; like eig_cuda but:
* 1. assume that self is a square matrix of side "n"
* 2. return CPU tensors (because this is what magmaEig returns), which will be copied to GPU memory
* by the caller
*/
std::tuple<Tensor, Tensor> eig_kernel_impl(const Tensor& self, bool& eigenvectors) {
int64_t n = self.size(-1);
// copy self to pinned CPU memory
auto self_working_copy = at::empty_strided(
{n, n}, // square matrix
{1, n}, // column-ordered, as magmaEig expects
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
// tensors holding the results. We use empty_strided to make them column-ordered
auto options = self.options().device(at::kCPU).memory_format(LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor out_eigvals;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
out_eigvals = at::empty({n}, options);
} else {
out_eigvals = at::empty_strided({n, 2}, {1, n}, options);
}
auto out_eigvecs = eigenvectors
? at::empty_strided({n, n}, {1, n}, options)
: Tensor();
int64_t info;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "eig_cuda", [&]{
apply_eig<scalar_t>(self_working_copy, eigenvectors, out_eigvals, out_eigvecs, &info);
});
singleCheckErrors(info, "eig_cuda");
return std::tuple<Tensor, Tensor>(out_eigvals, out_eigvecs);
}
REGISTER_DISPATCH(eig_stub, &eig_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the eigenvalues and eigenvectors of n-by-n matrix 'input'.
This is an in-place routine, content of 'input', 'values', 'vectors' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
For more information see MAGMA's documentation for GEEV routine.
*/
template <typename scalar_t>
void apply_linalg_eig(Tensor& values, Tensor& vectors, Tensor& input, Tensor& infos, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.linalg.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.linalg.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == at::kCPU);
if (compute_eigenvectors) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(vectors.device() == at::kCPU);
}
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_vec_t jobvl = MagmaNoVec; // only right eigenvectors are computed
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(input);
auto input_matrix_stride = matrixStride(input);
auto values_stride = values.size(-1);
auto input_data = input.data_ptr<scalar_t>();
auto values_data = values.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
auto rvectors_data = compute_eigenvectors ? vectors.data_ptr<scalar_t>() : nullptr;
scalar_t* lvectors_data = nullptr; // only right eigenvectors are computed
int64_t ldvr = compute_eigenvectors ? lda : 1;
int64_t ldvl = 1;
Tensor rwork;
value_t* rwork_data = nullptr;
if (input.is_complex()) {
ScalarType real_dtype = toValueType(input.scalar_type());
rwork = at::empty({lda * 2}, input.options().dtype(real_dtype));
rwork_data = rwork.data_ptr<value_t>();
}
// call magmaEig once to get the optimal size of work_data
scalar_t work_query;
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_data, lda, values_data,
lvectors_data, ldvl, rvectors_data, ldvr, &work_query, -1, rwork_data, &infos_data[0]);
magma_int_t lwork = std::max<magma_int_t>(1, static_cast<magma_int_t>(real_impl<scalar_t, value_t>(work_query)));
Tensor work = at::empty({lwork}, input.dtype());
auto work_data = work.data_ptr<scalar_t>();
for (auto i = decltype(batch_size){0}; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* values_working_ptr = &values_data[i * values_stride];
scalar_t* rvectors_working_ptr = compute_eigenvectors ? &rvectors_data[i * input_matrix_stride] : nullptr;
int* info_working_ptr = &infos_data[i];
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_working_ptr, lda, values_working_ptr,
lvectors_data, ldvl, rvectors_working_ptr, ldvr, work_data, lwork, rwork_data, info_working_ptr);
}
#endif
}
// This is a type dispatching helper function for 'apply_linalg_eig'
void linalg_eig_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, const Tensor& input, bool compute_eigenvectors) {
// This function calculates the non-symmetric eigendecomposition in-place
// tensors should be in batched column major memory format
// the content of eigenvalues, eigenvectors and infos is overwritten by 'apply_linalg_eig'
// apply_linalg_eig modifies the provided input matrix in-place, therefore we need a copy
// MAGMA doesn't have GPU interface for the eigendecomposition and it forces us to transfer 'input' to CPU
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.is_cuda());
Tensor input_working_copy = at::empty(input.sizes(), input.options().device(kCPU));
input_working_copy.transpose_(-2, -1); // make input_working_copy to have Fortran contiguous memory layout
input_working_copy.copy_(input);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "linalg_eig_out_cuda", [&]{
apply_linalg_eig<scalar_t>(eigenvalues, eigenvectors, input_working_copy, infos, compute_eigenvectors);
});
}
REGISTER_DISPATCH(linalg_eig_stub, &linalg_eig_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto lda = std::max<magma_int_t>(1, m);
auto ldvt = std::max<magma_int_t>(1, n);
auto mn = std::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt = 1; // MAGMA might not set the value for the optimal workspace therefore use 1 as the default value
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, lda,
S_working_ptr, U_working_ptr, lda, VT_working_ptr, ldvt, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_legacy(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = std::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (!compute_uv) {
VT_working_copy.zero_();
U_working_copy.zero_();
}
if (some) {
VT_working_copy = VT_working_copy.narrow(-2, 0, k);
}
// so far we have computed VT, but torch.svd returns V instead. Adjust accordingly.
// Note that the 'apply_svd' routine returns VT = V^T (for real inputs) or VT = V^H (for complex inputs), not V.
VT_working_copy = VT_working_copy.conj();
VT_working_copy.transpose_(-2, -1);
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
#ifdef USE_CUSOLVER
return _svd_helper_cuda_lib(self, some, compute_uv);
#else
return _svd_helper_cuda_legacy(self, some, compute_uv);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("lu_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto n = lu.size(-2);
auto nrhs = b.size(-1);
int info_tmp = 0;
if (b.dim() == 2) {
Tensor pivots_tmp = pivots.cpu();
magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp);
info = info_tmp;
} else {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaLuSolveBatched<scalar_t>(
n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data);
auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous();
if (self.numel() == 0 || LU_data.numel() == 0) {
return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{
apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info);
});
TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lstsq ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_gels(const Tensor& a, Tensor& b, Tensor& infos) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "torch.linalg.lstsq: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto trans = MagmaNoTrans;
auto m = magma_int_cast(a.size(-2), "m");
auto n = magma_int_cast(a.size(-1), "n");
TORCH_CHECK(
m >= n,
"torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA");
auto nrhs = magma_int_cast(b.size(-1), "nrhs");
auto ldda = std::max<magma_int_t>(1, m);
auto lddb = std::max<magma_int_t>(1, std::max(m, n));
auto nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
auto lwork = (m - n + nb) * (nrhs + nb) + nrhs * nb;
Tensor hwork = at::empty({static_cast<int64_t>(lwork)}, a.scalar_type());
auto* hwork_ptr = hwork.data_ptr<scalar_t>();
// MAGMA requires infos tensor to live on CPU
infos = infos.to(at::kCPU);
auto infos_data = infos.data_ptr<magma_int_t>();
batch_iterator_with_broadcasting<scalar_t>(a, b,
[&](scalar_t* a_working_ptr, scalar_t* b_working_ptr,
int64_t a_linear_batch_idx) {
magma_int_t* infos_working_ptr = &infos_data[a_linear_batch_idx];
magmaGels<scalar_t>(trans, m, n, nrhs,
a_working_ptr, ldda, b_working_ptr, lddb,
hwork_ptr, lwork, infos_working_ptr);
}
);
#endif
}
void lstsq_kernel(const Tensor& a, Tensor& b, Tensor& rank, Tensor& singular_values, Tensor& infos, double rcond, std::string driver_name) {
(void)rank; // unused
(void)singular_values; // unused
(void)rcond; // unused
(void)driver_name; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(a.scalar_type(), "linalg_lstsq_cuda", [&] {
apply_gels<scalar_t>(a, b, infos);
});
}
REGISTER_DISPATCH(lstsq_stub, &lstsq_kernel);
}} // namespace at::native
#undef ALLOCATE_ARRAY
|
0c3844f40423e91fd1c268b13033696f0f2e2942.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
#include <sys/ipc.h>
#include <sys/shm.h>
/**< NVCC assumes that all header files are C++ files. Tell it that these are
* C header files. */
extern "C" {
#include "common.h"
}
hipStream_t myStream;
void printDeviceProperties()
{
struct hipDeviceProp_t deviceProp;
int ret = hipGetDeviceProperties(&deviceProp, 0);
CPE(ret != hipSuccess, "Get Device Properties failed\n", -1);
printf("\n=================DEVICE PROPERTIES=================\n");
printf("\tDevice name: %s\n", deviceProp.name);
printf("\tTotal global memory: %lu bytes\n", deviceProp.totalGlobalMem);
printf("\tWarp size: %d\n", deviceProp.warpSize);
printf("\tCompute capability: %d.%d\n", deviceProp.major, deviceProp.minor);
printf("\tMulti-processor count: %d\n", deviceProp.multiProcessorCount);
printf("\tThreads per multi-processor: %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf("\n");
}
__global__ void
randMem(int *pkts, const int *log, int num_pkts)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_pkts) {
int j;
for(j = 0; j < DEPTH; j ++) {
pkts[i] = log[pkts[i]];
}
}
}
double cpu_run(int *pkts, int *log, int num_pkts)
{
int i;
struct timespec start, end;
clock_gettime(CLOCK_REALTIME, &start);
for(i = 0; i < num_pkts; i += 8) {
int j;
for(j = 0; j < DEPTH; j ++) {
pkts[i] = log[pkts[i]];
pkts[i + 1] = log[pkts[i + 1]];
pkts[i + 2] = log[pkts[i + 2]];
pkts[i + 3] = log[pkts[i + 3]];
pkts[i + 4] = log[pkts[i + 4]];
pkts[i + 5] = log[pkts[i + 5]];
pkts[i + 6] = log[pkts[i + 6]];
pkts[i + 7] = log[pkts[i + 7]];
}
}
clock_gettime(CLOCK_REALTIME, &end);
double time = (double) (end.tv_nsec - start.tv_nsec) / 1000000000 +
(end.tv_sec - start.tv_sec);
return time;
}
#if INCLUDE_COPY_TIME == 1
/**< Include copy overhead in measurement */
double gpu_run(int *h_pkts, int *d_pkts, int *d_log, int num_pkts)
{
struct timespec start, end;
int err = hipSuccess;
clock_gettime(CLOCK_REALTIME, &start);
/**< Copy packets to device */
err = hipMemcpyAsync(d_pkts, h_pkts, num_pkts * sizeof(int),
hipMemcpyHostToDevice, myStream);
CPE(err != hipSuccess, "Failed to copy to device memory\n", -1);
/**< Kernel launch */
int threadsPerBlock = 256;
int blocksPerGrid = (num_pkts + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( randMem), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, myStream, d_pkts,
d_log, num_pkts);
err = hipGetLastError();
CPE(err != hipSuccess, "Failed to launch randMem kernel. This can happen "
"if the GPU does not have compute 3.5\n", -1);
/**< Copy back the results */
err = hipMemcpyAsync(h_pkts, d_pkts, num_pkts * sizeof(int),
hipMemcpyDeviceToHost, myStream);
CPE(err != hipSuccess, "Failed to copy C from device to host\n", -1);
/**< Wait for all stream ops to complete */
hipStreamSynchronize(myStream);
clock_gettime(CLOCK_REALTIME, &end);
double time = (double) (end.tv_nsec - start.tv_nsec) / 1000000000 +
(end.tv_sec - start.tv_sec);
return time;
}
#else
/**< Don't include copy overhead in measurement */
double gpu_run(int *h_pkts, int *d_pkts, int *d_log, int num_pkts)
{
struct timespec start, end;
int err = hipSuccess;
/**< Copy packets to device */
err = hipMemcpy(d_pkts, h_pkts, num_pkts * sizeof(int),
hipMemcpyHostToDevice);
CPE(err != hipSuccess, "Failed to copy to device memory\n", -1);
/**< Memcpy has completed: start timer */
clock_gettime(CLOCK_REALTIME, &start);
/**< Kernel launch */
int threadsPerBlock = 256;
int blocksPerGrid = (num_pkts + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( randMem), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pkts, d_log, num_pkts);
err = hipGetLastError();
CPE(err != hipSuccess, "Failed to launch randMem kernel\n", -1);
hipDeviceSynchronize();
/**< Kernel execution finished: stop timer */
clock_gettime(CLOCK_REALTIME, &end);
/**< Copy back the results */
err = hipMemcpy(h_pkts, d_pkts, num_pkts * sizeof(int),
hipMemcpyDeviceToHost);
CPE(err != hipSuccess, "Failed to copy C from device to host\n", -1);
double time = (double) (end.tv_nsec - start.tv_nsec) / 1000000000 +
(end.tv_sec - start.tv_sec);
return time;
}
#endif
int main(int argc, char *argv[])
{
int err = hipSuccess;
int i, j;
int *h_log, *d_log;
int *h_pkts_cpu;
/**< Separate packet buffer to compare GPU's result with the CPU's */
int *h_pkts_gpu, *d_pkts_gpu;
srand(time(NULL));
printDeviceProperties();
/**< Initialize a cudaStream for async calls */
err = hipStreamCreate(&myStream);
CPE(err != hipSuccess, "Failed to create cudaStream\n", -1);
/**< Initialize hugepage log and copy it to the device: do it once */
red_printf("Allocating host log of size %lu bytes\n", LOG_CAP * sizeof(int));
int sid = shmget(1, LOG_CAP * sizeof(int), SHM_HUGETLB | 0666 | IPC_CREAT);
assert(sid >= 0);
h_log = (int *) shmat(sid, 0, 0);
assert(h_log != NULL);
init_ht_log(h_log, LOG_CAP);
red_printf("Allocating device log\n");
err = hipMalloc((void **) &d_log, LOG_CAP * sizeof(int));
CPE(err != hipSuccess, "Failed to allocate log on device\n", -1);
red_printf("Copying log to device\n");
err = hipMemcpy(d_log, h_log, LOG_CAP * sizeof(int), hipMemcpyHostToDevice);
CPE(err != hipSuccess, "Failed to copy to device memory\n", -1);
/**< Initialize the packet arrays for CPU and GPU code */
h_pkts_cpu = (int *) malloc(GPU_MAX_PKTS * sizeof(int));
/**< The host packet-array for GPU code should be pinned */
err = hipHostMalloc((void **) &h_pkts_gpu, GPU_MAX_PKTS * sizeof(int));
err = hipMalloc((void **) &d_pkts_gpu, GPU_MAX_PKTS * sizeof(int));
/**< Test for different batch sizes */
assert(GPU_MAX_PKTS % 8 == 0);
for(int num_pkts = 16; num_pkts < GPU_MAX_PKTS; num_pkts *= 2) {
double cpu_time = 0, gpu_time = 0;
/**< Perform several measurements for averaging */
for(i = 0; i < GPU_ITERS; i ++) {
/**< Generate a different set of packets for each iteration */
for(j = 0; j < num_pkts; j ++) {
h_pkts_cpu[j] = rand() & LOG_CAP_;
h_pkts_gpu[j] = h_pkts_cpu[j];
}
cpu_time += cpu_run(h_pkts_cpu, h_log, num_pkts);
gpu_time += gpu_run(h_pkts_gpu, d_pkts_gpu, d_log, num_pkts);
}
cpu_time = cpu_time / GPU_ITERS;
gpu_time = gpu_time / GPU_ITERS;
/**< Verify that the result vector is correct */
for(int i = 0; i < num_pkts; i ++) {
if (h_pkts_cpu[i] != h_pkts_gpu[i]) {
fprintf(stderr, "Result verification failed at element %d!\n", i);
fprintf(stderr, "CPU %d, GPU %d\n", h_pkts_cpu[i], h_pkts_gpu[i]);
exit(-1);
}
}
printf("PASS for num_pkts = %d\n", num_pkts);
/**< Print the number of packets processed per second */
printf("num_pkts %d CPU %.2f GPU %.2f\n", num_pkts,
num_pkts / (cpu_time * 1000000),
num_pkts / (gpu_time * 1000000));
printf("\n");
}
/**< Free device memory */
hipFree(d_pkts_gpu);
hipFree(d_log);
/**< Free host memory */
free(h_pkts_cpu);
hipHostFree(h_pkts_gpu);
free(h_log);
/**< Reset the device and exit */
err = hipDeviceReset();
CPE(err != hipSuccess, "Failed to de-initialize the device\n", -1);
printf("Done\n");
return 0;
}
| 0c3844f40423e91fd1c268b13033696f0f2e2942.cu | #include <stdio.h>
#include <assert.h>
#include <sys/ipc.h>
#include <sys/shm.h>
/**< NVCC assumes that all header files are C++ files. Tell it that these are
* C header files. */
extern "C" {
#include "common.h"
}
cudaStream_t myStream;
void printDeviceProperties()
{
struct cudaDeviceProp deviceProp;
int ret = cudaGetDeviceProperties(&deviceProp, 0);
CPE(ret != cudaSuccess, "Get Device Properties failed\n", -1);
printf("\n=================DEVICE PROPERTIES=================\n");
printf("\tDevice name: %s\n", deviceProp.name);
printf("\tTotal global memory: %lu bytes\n", deviceProp.totalGlobalMem);
printf("\tWarp size: %d\n", deviceProp.warpSize);
printf("\tCompute capability: %d.%d\n", deviceProp.major, deviceProp.minor);
printf("\tMulti-processor count: %d\n", deviceProp.multiProcessorCount);
printf("\tThreads per multi-processor: %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf("\n");
}
__global__ void
randMem(int *pkts, const int *log, int num_pkts)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_pkts) {
int j;
for(j = 0; j < DEPTH; j ++) {
pkts[i] = log[pkts[i]];
}
}
}
double cpu_run(int *pkts, int *log, int num_pkts)
{
int i;
struct timespec start, end;
clock_gettime(CLOCK_REALTIME, &start);
for(i = 0; i < num_pkts; i += 8) {
int j;
for(j = 0; j < DEPTH; j ++) {
pkts[i] = log[pkts[i]];
pkts[i + 1] = log[pkts[i + 1]];
pkts[i + 2] = log[pkts[i + 2]];
pkts[i + 3] = log[pkts[i + 3]];
pkts[i + 4] = log[pkts[i + 4]];
pkts[i + 5] = log[pkts[i + 5]];
pkts[i + 6] = log[pkts[i + 6]];
pkts[i + 7] = log[pkts[i + 7]];
}
}
clock_gettime(CLOCK_REALTIME, &end);
double time = (double) (end.tv_nsec - start.tv_nsec) / 1000000000 +
(end.tv_sec - start.tv_sec);
return time;
}
#if INCLUDE_COPY_TIME == 1
/**< Include copy overhead in measurement */
double gpu_run(int *h_pkts, int *d_pkts, int *d_log, int num_pkts)
{
struct timespec start, end;
int err = cudaSuccess;
clock_gettime(CLOCK_REALTIME, &start);
/**< Copy packets to device */
err = cudaMemcpyAsync(d_pkts, h_pkts, num_pkts * sizeof(int),
cudaMemcpyHostToDevice, myStream);
CPE(err != cudaSuccess, "Failed to copy to device memory\n", -1);
/**< Kernel launch */
int threadsPerBlock = 256;
int blocksPerGrid = (num_pkts + threadsPerBlock - 1) / threadsPerBlock;
randMem<<<blocksPerGrid, threadsPerBlock, 0, myStream>>>(d_pkts,
d_log, num_pkts);
err = cudaGetLastError();
CPE(err != cudaSuccess, "Failed to launch randMem kernel. This can happen "
"if the GPU does not have compute 3.5\n", -1);
/**< Copy back the results */
err = cudaMemcpyAsync(h_pkts, d_pkts, num_pkts * sizeof(int),
cudaMemcpyDeviceToHost, myStream);
CPE(err != cudaSuccess, "Failed to copy C from device to host\n", -1);
/**< Wait for all stream ops to complete */
cudaStreamSynchronize(myStream);
clock_gettime(CLOCK_REALTIME, &end);
double time = (double) (end.tv_nsec - start.tv_nsec) / 1000000000 +
(end.tv_sec - start.tv_sec);
return time;
}
#else
/**< Don't include copy overhead in measurement */
double gpu_run(int *h_pkts, int *d_pkts, int *d_log, int num_pkts)
{
struct timespec start, end;
int err = cudaSuccess;
/**< Copy packets to device */
err = cudaMemcpy(d_pkts, h_pkts, num_pkts * sizeof(int),
cudaMemcpyHostToDevice);
CPE(err != cudaSuccess, "Failed to copy to device memory\n", -1);
/**< Memcpy has completed: start timer */
clock_gettime(CLOCK_REALTIME, &start);
/**< Kernel launch */
int threadsPerBlock = 256;
int blocksPerGrid = (num_pkts + threadsPerBlock - 1) / threadsPerBlock;
randMem<<<blocksPerGrid, threadsPerBlock>>>(d_pkts, d_log, num_pkts);
err = cudaGetLastError();
CPE(err != cudaSuccess, "Failed to launch randMem kernel\n", -1);
cudaDeviceSynchronize();
/**< Kernel execution finished: stop timer */
clock_gettime(CLOCK_REALTIME, &end);
/**< Copy back the results */
err = cudaMemcpy(h_pkts, d_pkts, num_pkts * sizeof(int),
cudaMemcpyDeviceToHost);
CPE(err != cudaSuccess, "Failed to copy C from device to host\n", -1);
double time = (double) (end.tv_nsec - start.tv_nsec) / 1000000000 +
(end.tv_sec - start.tv_sec);
return time;
}
#endif
int main(int argc, char *argv[])
{
int err = cudaSuccess;
int i, j;
int *h_log, *d_log;
int *h_pkts_cpu;
/**< Separate packet buffer to compare GPU's result with the CPU's */
int *h_pkts_gpu, *d_pkts_gpu;
srand(time(NULL));
printDeviceProperties();
/**< Initialize a cudaStream for async calls */
err = cudaStreamCreate(&myStream);
CPE(err != cudaSuccess, "Failed to create cudaStream\n", -1);
/**< Initialize hugepage log and copy it to the device: do it once */
red_printf("Allocating host log of size %lu bytes\n", LOG_CAP * sizeof(int));
int sid = shmget(1, LOG_CAP * sizeof(int), SHM_HUGETLB | 0666 | IPC_CREAT);
assert(sid >= 0);
h_log = (int *) shmat(sid, 0, 0);
assert(h_log != NULL);
init_ht_log(h_log, LOG_CAP);
red_printf("Allocating device log\n");
err = cudaMalloc((void **) &d_log, LOG_CAP * sizeof(int));
CPE(err != cudaSuccess, "Failed to allocate log on device\n", -1);
red_printf("Copying log to device\n");
err = cudaMemcpy(d_log, h_log, LOG_CAP * sizeof(int), cudaMemcpyHostToDevice);
CPE(err != cudaSuccess, "Failed to copy to device memory\n", -1);
/**< Initialize the packet arrays for CPU and GPU code */
h_pkts_cpu = (int *) malloc(GPU_MAX_PKTS * sizeof(int));
/**< The host packet-array for GPU code should be pinned */
err = cudaMallocHost((void **) &h_pkts_gpu, GPU_MAX_PKTS * sizeof(int));
err = cudaMalloc((void **) &d_pkts_gpu, GPU_MAX_PKTS * sizeof(int));
/**< Test for different batch sizes */
assert(GPU_MAX_PKTS % 8 == 0);
for(int num_pkts = 16; num_pkts < GPU_MAX_PKTS; num_pkts *= 2) {
double cpu_time = 0, gpu_time = 0;
/**< Perform several measurements for averaging */
for(i = 0; i < GPU_ITERS; i ++) {
/**< Generate a different set of packets for each iteration */
for(j = 0; j < num_pkts; j ++) {
h_pkts_cpu[j] = rand() & LOG_CAP_;
h_pkts_gpu[j] = h_pkts_cpu[j];
}
cpu_time += cpu_run(h_pkts_cpu, h_log, num_pkts);
gpu_time += gpu_run(h_pkts_gpu, d_pkts_gpu, d_log, num_pkts);
}
cpu_time = cpu_time / GPU_ITERS;
gpu_time = gpu_time / GPU_ITERS;
/**< Verify that the result vector is correct */
for(int i = 0; i < num_pkts; i ++) {
if (h_pkts_cpu[i] != h_pkts_gpu[i]) {
fprintf(stderr, "Result verification failed at element %d!\n", i);
fprintf(stderr, "CPU %d, GPU %d\n", h_pkts_cpu[i], h_pkts_gpu[i]);
exit(-1);
}
}
printf("PASS for num_pkts = %d\n", num_pkts);
/**< Print the number of packets processed per second */
printf("num_pkts %d CPU %.2f GPU %.2f\n", num_pkts,
num_pkts / (cpu_time * 1000000),
num_pkts / (gpu_time * 1000000));
printf("\n");
}
/**< Free device memory */
cudaFree(d_pkts_gpu);
cudaFree(d_log);
/**< Free host memory */
free(h_pkts_cpu);
cudaFreeHost(h_pkts_gpu);
free(h_log);
/**< Reset the device and exit */
err = cudaDeviceReset();
CPE(err != cudaSuccess, "Failed to de-initialize the device\n", -1);
printf("Done\n");
return 0;
}
|
34b5d9cdad9cc155fdc9184a7dae6f6757e486d9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include "projektcuda.h"
#include "measurehelp.h"
int main()
{
printf( "Build configuration: sizeof(t_ve) = %u \n", sizeof(t_ve));
printf("\n detecting environment... \n");
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("\nDevice %d: \"%s\"\n \n", dev, deviceProp.name);
printf(" Number of multiprocessors: %d\n", deviceProp.multiProcessorCount);
printf(" CUDA Capability Major revision number: %d\n", deviceProp.major);
printf(" CUDA Capability Minor revision number: %d\n", deviceProp.minor);
printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
}
}
| 34b5d9cdad9cc155fdc9184a7dae6f6757e486d9.cu | #include <stdlib.h>
#include <stdio.h>
#include "projektcuda.h"
#include "measurehelp.h"
int main()
{
printf( "Build configuration: sizeof(t_ve) = %u \n", sizeof(t_ve));
printf("\n detecting environment... \n");
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("\nDevice %d: \"%s\"\n \n", dev, deviceProp.name);
printf(" Number of multiprocessors: %d\n", deviceProp.multiProcessorCount);
printf(" CUDA Capability Major revision number: %d\n", deviceProp.major);
printf(" CUDA Capability Minor revision number: %d\n", deviceProp.minor);
printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
}
}
|
fc94808872e7ae7abb90d21386a06e88cd90be10.hip | // !!! This is a file automatically generated by hipify!!!
#include "../THCTensorMathCompareT.cuh"
#include "THHTensor.hpp"
#include "THHStream.h"
#include "../generic/THCTensorMathCompareT.cu"
#include "../THCGenerateShortType.h"
| fc94808872e7ae7abb90d21386a06e88cd90be10.cu | #include "../THCTensorMathCompareT.cuh"
#include "THCTensor.hpp"
#include "THCStream.h"
#include "../generic/THCTensorMathCompareT.cu"
#include "../THCGenerateShortType.h"
|
b4b2db67886026c2707c273c7689de69299c0f47.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Helper.h"
#include "ArffImporter.h"
#include <rocblas.h>
#define MAX_ITER 1000
#define LEARNING_RATE 10.0f
Node initNode( unsigned int numFeatures )
{
Node node;
node.numFeatures = numFeatures;
node.weights = (float*) malloc( numFeatures * sizeof( float ) );
memset( node.weights, 0, numFeatures * sizeof( float ) );
return node;
}
__global__ void ComputeCost(
float* __restrict__ dCostArr,
const unsigned short* __restrict__ dClassArr,
const unsigned int numInstances )
{
unsigned int instanceId = blockIdx.x * blockDim.x + threadIdx.x;
if (instanceId >= numInstances) return;
float cost = dCostArr[instanceId];
cost = 1.0f / (1.0f + expf(-cost)) - (float) dClassArr[instanceId];
dCostArr[instanceId] = cost;
}
inline void cudaErrorCheck( hipError_t cudaStatus )
{
if (cudaStatus != hipSuccess)
printf(
"kernel launch failed with error \"%s\".\n",
hipGetErrorString( cudaStatus )
);
}
inline void cublasErrorCheck( hipblasStatus_t cublasStatus )
{
if (cublasStatus != HIPBLAS_STATUS_SUCCESS)
{
printf( "CuBLAS launch failed with error\n" );
switch (cublasStatus)
{
case HIPBLAS_STATUS_NOT_INITIALIZED:
printf( "HIPBLAS_STATUS_NOT_INITIALIZED\n" );
case HIPBLAS_STATUS_ALLOC_FAILED:
printf( "HIPBLAS_STATUS_ALLOC_FAILED\n" );
case HIPBLAS_STATUS_INVALID_VALUE:
printf( "HIPBLAS_STATUS_INVALID_VALUE\n" );
case HIPBLAS_STATUS_ARCH_MISMATCH:
printf( "HIPBLAS_STATUS_ARCH_MISMATCH\n" );
case HIPBLAS_STATUS_MAPPING_ERROR:
printf( "HIPBLAS_STATUS_MAPPING_ERROR\n" );
case HIPBLAS_STATUS_EXECUTION_FAILED:
printf( "HIPBLAS_STATUS_EXECUTION_FAILED\n" );
case HIPBLAS_STATUS_INTERNAL_ERROR:
printf( "HIPBLAS_STATUS_INTERNAL_ERROR\n" );
}
}
}
int main()
{
ArffImporter trainSetImporter;
trainSetImporter.Read( "Dataset/train/train-first1000.arff" );
// ArffImporter testSetImporter;
// testSetImporter.Read( "Dataset/test/dev-first1000.arff" );
// Init host data
float* featureMatTrans = trainSetImporter.GetFeatureMatTrans();
unsigned short* classArr = trainSetImporter.GetClassIndex();
unsigned int numInstances = trainSetImporter.GetNumInstances();
unsigned int numFeatures = trainSetImporter.GetNumFeatures();
Node node = initNode( numFeatures );
// Init device data
float* dCostArr = nullptr;
float* dWeightArr = nullptr;
float* dFeatureMatTrans = nullptr;
float* dFeaCostProdArr = nullptr;
unsigned short* dClassArr = nullptr;
// One instance per row, one feature per column, as cublas prefers column-major matrix (faster)
cudaErrorCheck( hipMalloc( (void**) &dFeatureMatTrans, numInstances * numFeatures * sizeof( float ) ) );
cudaErrorCheck( hipMalloc( (void**) &dWeightArr, numFeatures * sizeof( float ) ) );
cudaErrorCheck( hipMalloc( (void**) &dCostArr, numInstances * sizeof( float ) ) );
cudaErrorCheck( hipMalloc( (void**) &dClassArr, numInstances * sizeof( unsigned short ) ) );
cudaErrorCheck( hipMalloc( (void**) &dFeaCostProdArr, numFeatures * sizeof( float ) ) );
cudaErrorCheck( hipMemcpyAsync(
dFeatureMatTrans,
featureMatTrans,
numInstances * numFeatures * sizeof( float ),
hipMemcpyHostToDevice ) );
cudaErrorCheck( hipMemcpyAsync(
dWeightArr,
node.weights,
numFeatures * sizeof( float ),
hipMemcpyHostToDevice ) );
cudaErrorCheck( hipMemcpyAsync(
dClassArr,
classArr,
numInstances * sizeof( unsigned short ),
hipMemcpyHostToDevice ) );
/* Determine block and grid size of ComputeCost kernel */
dim3 ccBlockDim;
dim3 ccGridDim;
if (numInstances > 128)
{
ccBlockDim.x = 128;
ccGridDim.x = (numInstances + 127) / 128;
}
else ccBlockDim.x = numInstances;
// Init CuBLAS
hipblasHandle_t cublasHandle;
cublasErrorCheck( hipblasCreate( &cublasHandle ) );
// Gradient descent params
float updateWParam = -LEARNING_RATE / (float) numInstances;
unsigned int iter = 0;
time_t start, end;
float dif;
time( &start );
printf( "\nStart gradient descent...\n" );
float default_alpha = 1.0f;
float default_beta = 0.0f;
// Gradient descent
while (iter++ < MAX_ITER)
{
// Classify
cublasErrorCheck( hipblasSgemv(
cublasHandle,
HIPBLAS_OP_N,
numInstances,
numFeatures,
&default_alpha,
dFeatureMatTrans,
numInstances,
dWeightArr,
1,
&default_beta,
dCostArr,
1 ) );
hipLaunchKernelGGL(( ComputeCost), dim3(ccGridDim), dim3(ccBlockDim) , 0, 0,
dCostArr,
dClassArr,
numInstances );
cudaErrorCheck( hipGetLastError() );
// Cost vec dot FeaMat-Transpose
cublasErrorCheck( hipblasSgemv(
cublasHandle,
HIPBLAS_OP_T,
numInstances,
numFeatures,
&default_alpha,
dFeatureMatTrans,
numInstances,
dCostArr,
1,
&default_beta,
dFeaCostProdArr,
1 ) );
// Update weights
cublasErrorCheck( hipblasSaxpy(
cublasHandle,
numFeatures,
&updateWParam,
dFeaCostProdArr,
1,
dWeightArr,
1 ) );
}
cudaErrorCheck( hipDeviceSynchronize() );
cublasErrorCheck( hipblasDestroy( cublasHandle ) );
cudaErrorCheck( hipMemcpy(
node.weights,
dWeightArr,
numFeatures * sizeof( float ),
hipMemcpyDeviceToHost ) );
time( &end );
dif = difftime( end, start );
printf( "Time taken is %.2lf seconds.\n", dif );
printf( "Updating weights completed, weight: %f\n", node.weights[0] );
hipFree( dFeatureMatTrans );
hipFree( dClassArr );
hipFree( dWeightArr );
hipFree( dCostArr );
hipFree( dFeaCostProdArr );
free( node.weights );
return 0;
}
| b4b2db67886026c2707c273c7689de69299c0f47.cu | #include "Helper.h"
#include "ArffImporter.h"
#include <cublas_v2.h>
#define MAX_ITER 1000
#define LEARNING_RATE 10.0f
Node initNode( unsigned int numFeatures )
{
Node node;
node.numFeatures = numFeatures;
node.weights = (float*) malloc( numFeatures * sizeof( float ) );
memset( node.weights, 0, numFeatures * sizeof( float ) );
return node;
}
__global__ void ComputeCost(
float* __restrict__ dCostArr,
const unsigned short* __restrict__ dClassArr,
const unsigned int numInstances )
{
unsigned int instanceId = blockIdx.x * blockDim.x + threadIdx.x;
if (instanceId >= numInstances) return;
float cost = dCostArr[instanceId];
cost = 1.0f / (1.0f + expf(-cost)) - (float) dClassArr[instanceId];
dCostArr[instanceId] = cost;
}
inline void cudaErrorCheck( cudaError_t cudaStatus )
{
if (cudaStatus != cudaSuccess)
printf(
"kernel launch failed with error \"%s\".\n",
cudaGetErrorString( cudaStatus )
);
}
inline void cublasErrorCheck( cublasStatus_t cublasStatus )
{
if (cublasStatus != CUBLAS_STATUS_SUCCESS)
{
printf( "CuBLAS launch failed with error\n" );
switch (cublasStatus)
{
case CUBLAS_STATUS_NOT_INITIALIZED:
printf( "CUBLAS_STATUS_NOT_INITIALIZED\n" );
case CUBLAS_STATUS_ALLOC_FAILED:
printf( "CUBLAS_STATUS_ALLOC_FAILED\n" );
case CUBLAS_STATUS_INVALID_VALUE:
printf( "CUBLAS_STATUS_INVALID_VALUE\n" );
case CUBLAS_STATUS_ARCH_MISMATCH:
printf( "CUBLAS_STATUS_ARCH_MISMATCH\n" );
case CUBLAS_STATUS_MAPPING_ERROR:
printf( "CUBLAS_STATUS_MAPPING_ERROR\n" );
case CUBLAS_STATUS_EXECUTION_FAILED:
printf( "CUBLAS_STATUS_EXECUTION_FAILED\n" );
case CUBLAS_STATUS_INTERNAL_ERROR:
printf( "CUBLAS_STATUS_INTERNAL_ERROR\n" );
}
}
}
int main()
{
ArffImporter trainSetImporter;
trainSetImporter.Read( "Dataset/train/train-first1000.arff" );
// ArffImporter testSetImporter;
// testSetImporter.Read( "Dataset/test/dev-first1000.arff" );
// Init host data
float* featureMatTrans = trainSetImporter.GetFeatureMatTrans();
unsigned short* classArr = trainSetImporter.GetClassIndex();
unsigned int numInstances = trainSetImporter.GetNumInstances();
unsigned int numFeatures = trainSetImporter.GetNumFeatures();
Node node = initNode( numFeatures );
// Init device data
float* dCostArr = nullptr;
float* dWeightArr = nullptr;
float* dFeatureMatTrans = nullptr;
float* dFeaCostProdArr = nullptr;
unsigned short* dClassArr = nullptr;
// One instance per row, one feature per column, as cublas prefers column-major matrix (faster)
cudaErrorCheck( cudaMalloc( (void**) &dFeatureMatTrans, numInstances * numFeatures * sizeof( float ) ) );
cudaErrorCheck( cudaMalloc( (void**) &dWeightArr, numFeatures * sizeof( float ) ) );
cudaErrorCheck( cudaMalloc( (void**) &dCostArr, numInstances * sizeof( float ) ) );
cudaErrorCheck( cudaMalloc( (void**) &dClassArr, numInstances * sizeof( unsigned short ) ) );
cudaErrorCheck( cudaMalloc( (void**) &dFeaCostProdArr, numFeatures * sizeof( float ) ) );
cudaErrorCheck( cudaMemcpyAsync(
dFeatureMatTrans,
featureMatTrans,
numInstances * numFeatures * sizeof( float ),
cudaMemcpyHostToDevice ) );
cudaErrorCheck( cudaMemcpyAsync(
dWeightArr,
node.weights,
numFeatures * sizeof( float ),
cudaMemcpyHostToDevice ) );
cudaErrorCheck( cudaMemcpyAsync(
dClassArr,
classArr,
numInstances * sizeof( unsigned short ),
cudaMemcpyHostToDevice ) );
/* Determine block and grid size of ComputeCost kernel */
dim3 ccBlockDim;
dim3 ccGridDim;
if (numInstances > 128)
{
ccBlockDim.x = 128;
ccGridDim.x = (numInstances + 127) / 128;
}
else ccBlockDim.x = numInstances;
// Init CuBLAS
cublasHandle_t cublasHandle;
cublasErrorCheck( cublasCreate( &cublasHandle ) );
// Gradient descent params
float updateWParam = -LEARNING_RATE / (float) numInstances;
unsigned int iter = 0;
time_t start, end;
float dif;
time( &start );
printf( "\nStart gradient descent...\n" );
float default_alpha = 1.0f;
float default_beta = 0.0f;
// Gradient descent
while (iter++ < MAX_ITER)
{
// Classify
cublasErrorCheck( cublasSgemv(
cublasHandle,
CUBLAS_OP_N,
numInstances,
numFeatures,
&default_alpha,
dFeatureMatTrans,
numInstances,
dWeightArr,
1,
&default_beta,
dCostArr,
1 ) );
ComputeCost<<< ccGridDim, ccBlockDim >>>(
dCostArr,
dClassArr,
numInstances );
cudaErrorCheck( cudaGetLastError() );
// Cost vec dot FeaMat-Transpose
cublasErrorCheck( cublasSgemv(
cublasHandle,
CUBLAS_OP_T,
numInstances,
numFeatures,
&default_alpha,
dFeatureMatTrans,
numInstances,
dCostArr,
1,
&default_beta,
dFeaCostProdArr,
1 ) );
// Update weights
cublasErrorCheck( cublasSaxpy(
cublasHandle,
numFeatures,
&updateWParam,
dFeaCostProdArr,
1,
dWeightArr,
1 ) );
}
cudaErrorCheck( cudaThreadSynchronize() );
cublasErrorCheck( cublasDestroy( cublasHandle ) );
cudaErrorCheck( cudaMemcpy(
node.weights,
dWeightArr,
numFeatures * sizeof( float ),
cudaMemcpyDeviceToHost ) );
time( &end );
dif = difftime( end, start );
printf( "Time taken is %.2lf seconds.\n", dif );
printf( "Updating weights completed, weight: %f\n", node.weights[0] );
cudaFree( dFeatureMatTrans );
cudaFree( dClassArr );
cudaFree( dWeightArr );
cudaFree( dCostArr );
cudaFree( dFeaCostProdArr );
free( node.weights );
return 0;
}
|
939646ea16fc3bc7914efdffeb401fb51c2bd74d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <rocblas.h>
extern "C" {
#include "../cwc.h"
#include "../cwc_internal.h"
}
#include "../../inl/ccv_convnet_inl.h"
template <int input_per_thread, int filter_per_thread, int filter_per_block>
__global__ static void _cwc_kern_convolutional_forward_propagate(const int strides, const int border, const int batch,
float* input, const int rows, const int cols, const int channels_per_partition, const int partition,
float* out, const int out_rows, const int out_cols,
float* filter, const int filter_rows, const int filter_cols, const int count,
float* const biases)
{
assert(gridDim.x * partition * filter_per_block == out_cols * count);
assert(gridDim.y == out_rows);
assert(gridDim.z == partition);
extern __shared__ float shared[];
float* shared_block = &shared[0];
float* shared_weights = &shared[batch];
float* shared_bias = &shared[batch + filter_per_block];
float prod[filter_per_thread][input_per_thread];
assert(batch == input_per_thread * blockDim.x);
assert(filter_per_block == filter_per_thread * blockDim.y);
const int thidx = threadIdx.x + threadIdx.y * blockDim.x;
const int thcnt = blockDim.x * blockDim.y;
int c, i, j, x, y;
#pragma unroll
for (i = 0; i < filter_per_thread; i++)
#pragma unroll
for (j = 0; j < input_per_thread; j++)
prod[i][j] = 0;
const int origin_x = blockIdx.x % out_cols;
const int origin_y = blockIdx.y;
const int filter_group_idx = blockIdx.z * count / (filter_per_block * partition) + blockIdx.x / out_cols; // for the partitioned filter group
input += (blockIdx.z * channels_per_partition * rows * cols + origin_y * strides * cols + origin_x * strides) * batch;
assert(thcnt >= batch);
assert(thcnt >= filter_per_block);
if (thidx < filter_per_block)
shared_bias[thidx] = biases[filter_group_idx * filter_per_block + thidx];
const int start_x = max(origin_x * strides - border, 0) - (origin_x * strides - border);
const int end_x = min(origin_x * strides - border + filter_cols, cols) - (origin_x * strides - border);
const int start_y = max(origin_y * strides - border, 0) - (origin_y * strides - border);
const int end_y = min(origin_y * strides - border + filter_rows, rows) - (origin_y * strides - border);
filter += filter_group_idx * filter_per_block;
for (c = 0; c < channels_per_partition; c++)
{
for (y = start_y; y < end_y; y++)
for (x = start_x; x < end_x; x++)
{
if (thidx < batch)
shared_block[thidx] = input[((y - border) * cols + x - border) * batch + thidx];
if (thidx < filter_per_block)
shared_weights[thidx] = filter[(y * filter_cols + x) * count + thidx];
__syncthreads();
#pragma unroll
for (i = 0; i < filter_per_thread; i++)
#pragma unroll
for (j = 0; j < input_per_thread; j++)
prod[i][j] += shared_block[j + threadIdx.x * input_per_thread] * shared_weights[i + threadIdx.y * filter_per_thread];
__syncthreads();
}
input += rows * cols * batch;
filter += filter_rows * filter_cols * count;
}
const int outcnt = out_rows * out_cols * batch;
out += (filter_group_idx * filter_per_block + threadIdx.y * filter_per_thread) * outcnt + (origin_y * out_cols + origin_x) * batch + threadIdx.x * input_per_thread;
#pragma unroll
for (i = 0; i < filter_per_thread; i++)
{
const float bias = shared_bias[i + threadIdx.y * filter_per_thread];
#pragma unroll
for (j = 0; j < input_per_thread; j++)
out[j] = max(0.0, prod[i][j] + bias);
out += outcnt;
}
}
static int _cwc_convnet_convolutional_forward_propagate_vary(ccv_convnet_layer_t* layer, int rows, int cols, int batch, float* a, float* b, const hipStream_t& stream,
int x, int y, int z) // these are the dynamic configurations
{
int out_rows, out_cols, out_partition;
ccv_convnet_make_output(layer, rows, cols, &out_rows, &out_cols, &out_partition);
// first do configuration validation
if (!(batch % x == 0 && z % y == 0 && layer->net.convolutional.count % (z * out_partition) == 0 &&
batch / x * z / y <= 1024 && /* thread number constraint */
batch / x * z / y >= batch && batch / x * z / y >= z && /* kernel internal loading constraint */
sizeof(float) * (batch + z * 2) <= 48 * 1024 /* shared memory size constraint */))
return -1;
assert(b);
#define vary_block(_x, _y, _z) do { \
dim3 threads_per_block(batch / _x, _z / _y); \
assert(threads_per_block.x * threads_per_block.y <= 1024); \
dim3 num_blocks(out_cols * layer->net.convolutional.count / (_z * out_partition), out_rows, out_partition); \
int shared_memory_size = sizeof(float) * (batch + _z * 2); \
hipFuncSetCacheConfig(_cwc_kern_convolutional_forward_propagate<_x, _y, _z>, hipFuncCachePreferShared); \
hipLaunchKernelGGL(( _cwc_kern_convolutional_forward_propagate \
<_x, _y, _z>) \
, dim3(num_blocks), dim3(threads_per_block), shared_memory_size, stream, \
layer->net.convolutional.strides, layer->net.convolutional.border, batch, \
a, rows, cols, layer->input.matrix.channels / out_partition, out_partition, \
b, out_rows, out_cols, \
layer->w, layer->net.convolutional.rows, layer->net.convolutional.cols, layer->net.convolutional.count, \
layer->bias); \
} while (0)
cwc_vary_4_a(x, 1, 2, 4, 8, cwc_vary_5_b, y, 1, 2, 4, 6, 8, cwc_vary_6_c, z, 16, 24, 32, 36, 64, 72, vary_block);
#undef vary_block
hipError_t error = hipGetLastError();
if (hipErrorInvalidConfiguration == error)
return -1;
assert(error == hipSuccess);
return 0;
}
void cwc_convnet_convolutional_forward_propagate(ccv_convnet_layer_t* layer, int rows, int cols, int batch, float* a, float* b, const hipStream_t& stream)
{
static int vary_x[] = { 1, 2, 4, 8 };
static int vary_y[] = { 1, 2, 4, 6, 8 };
static int vary_z[] = { 16, 24, 32, 36, 64, 72 };
CWC_IMPLEMENT_VARY_STUB(EXTRA(layer)->vary.convolutional.forward, vary_x, vary_y, vary_z, _cwc_convnet_convolutional_forward_propagate_vary, layer, rows, cols, batch, a, b, stream);
}
template <int channel_per_thread, int filter_per_thread, int channel_per_block, int batch_per_block>
__global__ static void _cwc_kern_convolutional_backward_propagate_coefficient_default(const int strides, const int border, const int batch, const int batch_group_count,
float* input, const int rows, const int cols, const int channels_per_partition, const int partition,
float* out_grad, const int out_rows, const int out_cols,
float* coeff, const int filter_rows, const int filter_cols, const int count_per_partition)
{
assert(gridDim.x == filter_cols);
assert(gridDim.y == filter_rows);
assert(gridDim.z * channel_per_block * batch_per_block == channels_per_partition * partition * batch);
assert(batch == batch_per_block * batch_group_count);
extern __shared__ float shared[];
float* shared_input = &shared[0];
float* shared_out_grad = &shared[channel_per_block];
const int thidx = threadIdx.x + threadIdx.y * blockDim.x;
const int thcnt = blockDim.x * blockDim.y;
assert(blockDim.x * filter_per_thread == count_per_partition);
assert(blockDim.y * channel_per_thread == channel_per_block);
assert(thcnt >= channel_per_block);
assert(thcnt >= count_per_partition);
const int origin_x = blockIdx.x;
const int origin_y = blockIdx.y;
const int channel_group_count = channels_per_partition / channel_per_block;
const int partition_idx = blockIdx.z / (channel_group_count * batch_group_count);
const int batch_group_idx = (blockIdx.z % (channel_group_count * batch_group_count)) / channel_group_count;
const int channel_group_idx = blockIdx.z % channel_group_count;
const int start_x = max(origin_x - border, 0) - (origin_x - border);
const int end_x = min(out_cols, (cols + border - origin_x + strides - 1) / strides);
const int start_y = max(origin_y - border, 0) - (origin_y - border);
const int end_y = min(out_rows, (rows + border - origin_y + strides - 1) / strides);
input += (partition_idx * batch + batch_group_idx * batch_per_block) * rows * cols * channels_per_partition + (origin_y * cols + origin_x) * channels_per_partition + channel_group_idx * channel_per_block;
out_grad += (partition_idx * batch + batch_group_idx * batch_per_block) * out_rows * out_cols * count_per_partition;
int i, j, c, x, y;
float prod[channel_per_thread][filter_per_thread];
#pragma unroll
for (i = 0; i < channel_per_thread; i++)
#pragma unroll
for (j = 0; j < filter_per_thread; j++)
prod[i][j] = 0;
for (c = 0; c < batch_per_block; c++)
{
for (y = start_y; y < end_y; y++)
for (x = start_x; x < end_x; x++)
{
if (thidx < count_per_partition)
shared_out_grad[thidx] = out_grad[(y * out_cols + x) * count_per_partition + thidx];
if (thidx < channel_per_block)
shared_input[thidx] = input[((y * strides - border) * cols + x * strides - border) * channels_per_partition + thidx];
__syncthreads();
#pragma unroll
for (i = 0; i < channel_per_thread; i++)
#pragma unroll
for (j = 0; j < filter_per_thread; j++)
prod[i][j] += shared_input[i + threadIdx.y * channel_per_thread] * shared_out_grad[j + threadIdx.x * filter_per_thread];
__syncthreads();
}
input += rows * cols * channels_per_partition;
out_grad += out_rows * out_cols * count_per_partition;
}
const int cocnt = filter_cols * filter_rows * count_per_partition * partition;
coeff += cocnt * (channels_per_partition * batch_group_idx + channel_group_idx * channel_per_block) + (origin_y * filter_cols + origin_x) * count_per_partition * partition + partition_idx * count_per_partition;
#pragma unroll
for (i = 0; i < channel_per_thread; i++)
#pragma unroll
for (j = 0; j < filter_per_thread; j++)
coeff[(i + threadIdx.y * channel_per_thread) * cocnt + j + threadIdx.x * filter_per_thread] = prod[i][j];
}
template <int channel_per_thread, int filter_per_thread, int static_filter_rows, int batch_per_block>
__global__ static void _cwc_kern_convolutional_backward_propagate_coefficient_rows(const int strides, const int border, const int batch,
float* input, const int rows, const int cols, const int channels,
float* out_grad, const int out_rows, const int out_cols,
float* coeff, const int filter_rows, const int filter_cols, const int count)
{
assert(gridDim.x == filter_cols);
assert(gridDim.y == out_rows);
assert(static_filter_rows == filter_rows);
extern __shared__ float shared[];
float* shared_input = &shared[0];
float* shared_out_grad = &shared[filter_rows * channels * batch_per_block];
const int thidx = threadIdx.x + threadIdx.y * blockDim.x;
const int thcnt = blockDim.x * blockDim.y;
assert(blockDim.x * filter_per_thread == count);
assert(blockDim.y * channel_per_thread == channels);
assert(thcnt >= channels * batch_per_block);
assert(thcnt >= count);
const int origin_x = blockIdx.x;
const int batch_group_idx = blockIdx.z;
const int start_x = max(origin_x - border, 0) - (origin_x - border);
const int end_x = min(out_cols, (cols + border - origin_x + strides - 1) / strides);
input += (rows * cols * channels * batch_group_idx + origin_x * channels) * batch_per_block;
out_grad += out_rows * out_cols * count * batch_group_idx * batch_per_block;
int i, j, k, c, x;
const int y = blockIdx.y;
float prod[static_filter_rows][channel_per_thread][filter_per_thread];
#pragma unroll
for (i = 0; i < static_filter_rows; i++)
#pragma unroll
for (j = 0; j < channel_per_thread; j++)
#pragma unroll
for (k = 0; k < filter_per_thread; k++)
prod[i][j][k] = 0;
const int iy = y * strides - border;
input += y * strides * cols * channels * batch_per_block;
out_grad += y * out_cols * count * batch_per_block;
for (x = start_x; x < end_x; x++)
{
if (thidx < channels * batch_per_block)
#pragma unroll
for (i = 0; i < static_filter_rows; i++)
shared_input[i * channels * batch_per_block + thidx] = (i + iy >= 0 && i + iy < rows) ? input[((i - border) * cols + x * strides - border) * channels * batch_per_block + thidx] : 0;
if (thidx < count)
#pragma unroll
for (c = 0; c < batch_per_block; c++)
shared_out_grad[c * count + thidx] = out_grad[x * count * batch_per_block + c * count + thidx];
__syncthreads();
#pragma unroll
for (i = 0; i < static_filter_rows; i++)
#pragma unroll
for (j = 0; j < channel_per_thread; j++)
#pragma unroll
for (k = 0; k < filter_per_thread; k++)
{
float sum = 0;
#pragma unroll
for (c = 0; c < batch_per_block; c++)
sum += shared_input[i * channels * batch_per_block + c * channels + j + threadIdx.y * channel_per_thread] * shared_out_grad[c * count + k + threadIdx.x * filter_per_thread];
prod[i][j][k] += sum;
}
__syncthreads();
}
const int cocnt = filter_cols * filter_rows * count;
coeff += cocnt * channels * (blockIdx.y + blockIdx.z * out_rows) + origin_x * count;
#pragma unroll
for (i = 0; i < channel_per_thread; i++)
#pragma unroll
for (j = 0; j < static_filter_rows; j++)
#pragma unroll
for (k = 0; k < filter_per_thread; k++)
coeff[(i + threadIdx.y * channel_per_thread) * cocnt + j * filter_cols * count + k + threadIdx.x * filter_per_thread] = prod[j][i][k];
}
template <int input_per_thread, int channel_per_thread, int channel_per_block, int strides>
__global__ static void _cwc_kern_convolutional_backward_propagate_error(const int border, const int batch,
float* input_grad, const int rows, const int cols, const int channels,
float* out_grad, const int out_rows, const int out_cols,
float* filter, const int filter_rows, const int filter_cols, const int count_per_partition, const int partition)
{
assert(gridDim.z == partition);
extern __shared__ float shared[];
float* shared_grad = &shared[0];
float* shared_weights = &shared[batch];
float prod[input_per_thread][channel_per_thread];
assert(batch == input_per_thread * blockDim.x);
assert(channel_per_block == channel_per_thread * blockDim.y);
const int thidx = threadIdx.x + threadIdx.y * blockDim.x;
const int thcnt = blockDim.x * blockDim.y;
assert(thcnt >= batch);
assert(thcnt >= channel_per_block);
const int origin_x = blockIdx.x % cols;
const int origin_y = blockIdx.y;
const int channel_group_idx = blockIdx.z * channels / (channel_per_block * partition) + blockIdx.x / cols;
int i, j, k, c, x, y;
#pragma unroll
for (i = 0; i < input_per_thread; i++)
#pragma unroll
for (j = 0; j < channel_per_thread; j++)
prod[i][j] = 0;
const int ycnt = (filter_rows - 1 - (origin_x + border) % strides) / strides + 1;
const int xcnt = (filter_cols - 1 - (origin_y + border) % strides) / strides + 1;
const int filter_y = (ycnt - 1) * strides + (origin_x + border) % strides;
assert(filter_y < filter_rows);
const int filter_x = (xcnt - 1) * strides + (origin_y + border) % strides;
assert(filter_x < filter_cols);
const int out_y = (origin_x + border) / strides - ycnt + 1;
const int out_x = (origin_y + border) / strides - xcnt + 1;
const int out_start_y = max(out_y, 0);
const int out_start_x = max(out_x, 0);
const int filter_start_y = filter_y - (out_start_y - out_y) * strides;
const int filter_start_x = filter_x - (out_start_x - out_x) * strides;
out_grad += (blockIdx.z * count_per_partition * out_rows * out_cols + out_start_y * out_cols + out_start_x) * batch;
const int out_end_y = out_y + ycnt - 1;
const int out_end_x = out_x + xcnt - 1;
const int filter_end_y = (origin_x + border) % strides + (out_end_y - min(out_end_y, out_rows - 1)) * strides;
const int filter_end_x = (origin_y + border) % strides + (out_end_x - min(out_end_x, out_cols - 1)) * strides;
const int outcnt = out_rows * out_cols * batch;
filter += channel_group_idx * channel_per_block;
for (k = 0; k < count_per_partition; k++)
{
float* out_grad_per_filter = out_grad + k * outcnt;
for (y = filter_start_y; y >= filter_end_y; y -= strides)
{
for (x = filter_start_x, c = 0; x >= filter_end_x; x -= strides, c++)
{
if (thidx < batch)
shared_grad[thidx] = out_grad_per_filter[c * batch + thidx];
if (thidx < channel_per_block)
shared_weights[thidx] = filter[(y * filter_cols + x) * channels + thidx];
__syncthreads();
#pragma unroll
for (i = 0; i < input_per_thread; i++)
#pragma unroll
for (j = 0; j < channel_per_thread; j++)
prod[i][j] += shared_grad[i + threadIdx.x * input_per_thread] * shared_weights[j + threadIdx.y * channel_per_thread];
__syncthreads();
}
out_grad_per_filter += out_cols * batch;
}
filter += filter_rows * filter_cols * channels;
}
const int incnt = rows * cols * batch;
input_grad += channel_group_idx * channel_per_block * incnt + (origin_x * cols + origin_y) * batch;
#pragma unroll
for (i = 0; i < channel_per_thread; i++)
#pragma unroll
for (j = 0; j < input_per_thread; j++)
input_grad[(i + threadIdx.y * channel_per_thread) * incnt + j + threadIdx.x * input_per_thread] = prod[j][i];
}
// this method rewinds a matrix
template <int reorder_per_block>
__global__ static void _cwc_kern_reorder_matrix_major(float* a, float* b, const int count, const int channels_per_partition, const int partition, const int batch)
{
assert(blockDim.x == reorder_per_block);
const int batch_group_idx = blockIdx.y % (batch / reorder_per_block);
const int channel_group_idx = blockIdx.y / (batch / reorder_per_block);
a += (blockIdx.z * count * channels_per_partition + blockIdx.x + channel_group_idx * reorder_per_block * count) * batch + batch_group_idx * reorder_per_block;
b += (blockIdx.z * count * batch + batch_group_idx * reorder_per_block * count + blockIdx.x) * channels_per_partition + channel_group_idx * reorder_per_block;
__shared__ float prod[reorder_per_block][reorder_per_block];
int i;
#pragma unroll
for (i = 0; i < reorder_per_block; i++)
prod[i][threadIdx.x] = a[i * count * batch + threadIdx.x];
__syncthreads();
#pragma unroll
for (i = 0; i < reorder_per_block; i++)
b[i * count * channels_per_partition + threadIdx.x] = prod[threadIdx.x][i];
}
// this method rewinds a matrix
__global__ static void _cwc_kern_reorder_matrix_major_parted(float* a, float* b, const int count, const int channels, const int batch, const int channels_per_partition, const int batch_per_partition, const int partition)
{
b[(threadIdx.x * count + blockIdx.x) * channels + blockIdx.y + threadIdx.y * channels_per_partition] = a[(blockIdx.y * count + blockIdx.x) * batch + threadIdx.x + threadIdx.y * batch_per_partition];
}
// this method rewinds a matrix
template <int batch_per_block>
__global__ static void _cwc_kern_reorder_matrix_major_per_block_rows(float* a, float* b, const int count, const int channels, const int batch)
{
const int thidx = blockIdx.y * batch_per_block + threadIdx.y;
b[(blockIdx.y * count + blockIdx.x) * channels * batch_per_block + threadIdx.y * channels + threadIdx.x] = a[(threadIdx.x * count + blockIdx.x) * batch + thidx];
}
// this method rewinds a matrix
template <int channel_per_block, int batch_per_block, int batch_group_per_block>
__global__ static void _cwc_kern_reorder_matrix_major_per_block(float* a, float* b, const int count, const int channels, const int batch)
{
const int batch_group_idx = blockIdx.y % (batch / (batch_per_block * batch_group_per_block));
const int channel_group_idx = blockIdx.y / (batch / (batch_per_block * batch_group_per_block));
a += (channel_group_idx * channel_per_block * count + blockIdx.x) * batch + batch_group_idx * batch_per_block * batch_group_per_block;
b += (batch_group_idx * batch_group_per_block * count + blockIdx.x) * channels * batch_per_block + channel_group_idx * channel_per_block;
__shared__ float prod[channel_per_block][batch_per_block * batch_group_per_block];
int i, j;
#pragma unroll
for (i = 0; i < channel_per_block; i++)
prod[i][threadIdx.x] = a[i * count * batch + threadIdx.x];
__syncthreads();
if (threadIdx.x < channel_per_block)
#pragma unroll
for (i = 0; i < batch_group_per_block; i++)
#pragma unroll
for (j = 0; j < batch_per_block; j++)
b[(i * count * batch_per_block + j) * channels + threadIdx.x] = prod[threadIdx.x][i * batch_per_block + j];
}
static void _cwc_convnet_reorder_matrix_major_per_block(float* a, float* b, const int count, const int channels, const int batch, const hipStream_t& stream)
{
// this is by experience, ideally, this can be profile-guided too
const int batch_group_count = batch / BATCH_PER_BLOCK;
if (channels < 8)
{
assert(batch % BATCH_PER_BLOCK == 0);
assert(channels * BATCH_PER_BLOCK <= 1024);
hipLaunchKernelGGL(( _cwc_kern_reorder_matrix_major_per_block_rows
<BATCH_PER_BLOCK>)
, dim3(dim3(count, batch_group_count)), dim3(dim3(channels, BATCH_PER_BLOCK)), 0, stream,
a, b, count, channels, batch);
} else {
assert(channels % THREAD_PER_BLOCK == 0);
assert(THREAD_PER_BLOCK % BATCH_PER_BLOCK == 0);
assert(batch % THREAD_PER_BLOCK == 0);
hipLaunchKernelGGL(( _cwc_kern_reorder_matrix_major_per_block
<THREAD_PER_BLOCK, BATCH_PER_BLOCK, THREAD_PER_BLOCK / BATCH_PER_BLOCK>)
, dim3(dim3(count, (channels / THREAD_PER_BLOCK) * (batch / THREAD_PER_BLOCK))), dim3(THREAD_PER_BLOCK), sizeof(float) * THREAD_PER_BLOCK * THREAD_PER_BLOCK, stream,
a, b, count, channels, batch);
}
}
static int _cwc_convnet_convolutional_backward_propagate_coefficient_rows_vary(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, ccv_convnet_layer_t* configuration, float* scratch, float* unit, const hipStream_t& stream, const hipblasHandle_t& handle,
int x, int y, int z)
{
if (!(layer->net.convolutional.count % y == 0 && layer->input.matrix.channels % x == 0 &&
layer->net.convolutional.count / y * layer->input.matrix.channels / x <= 1024 && /* thread per block constraint */
layer->net.convolutional.count / y * layer->input.matrix.channels / x >= layer->input.matrix.channels * BATCH_PER_BLOCK &&
layer->net.convolutional.count / y * layer->input.matrix.channels / x >= layer->net.convolutional.count && /* shared loading constraint */
sizeof(float) * BATCH_PER_BLOCK * (layer->net.convolutional.rows * layer->input.matrix.channels + layer->net.convolutional.count) <= 48 * 1024 /* shared memory size constraint */))
return -1;
int out_rows, out_cols, out_partition;
ccv_convnet_make_output(layer, layer->input.matrix.rows, layer->input.matrix.cols, &out_rows, &out_cols, &out_partition);
assert(out_partition == 1); // this cannot handle partition
float* chm = scratch;
float* cha = scratch + layer->input.matrix.rows * layer->input.matrix.cols * layer->input.matrix.channels * batch;
float* cbw = scratch + layer->input.matrix.rows * layer->input.matrix.cols * layer->input.matrix.channels * batch + out_rows * out_cols * layer->net.convolutional.count * batch;
const int batch_group_count = batch / BATCH_PER_BLOCK;
_cwc_convnet_reorder_matrix_major_per_block
(m, chm, layer->input.matrix.rows * layer->input.matrix.cols, layer->input.matrix.channels, batch, stream);
_cwc_convnet_reorder_matrix_major_per_block
(a, cha, out_rows * out_cols, layer->net.convolutional.count, batch, stream);
#define vary_block(_x, _y, _z) do { \
dim3 threads_per_block_for_coeff(layer->net.convolutional.count / _y, layer->input.matrix.channels / _x); \
assert(threads_per_block_for_coeff.x * threads_per_block_for_coeff.y <= 1024); \
dim3 num_blocks_for_coeff(layer->net.convolutional.cols, out_rows, batch_group_count); \
int shared_memory_size = sizeof(float) * BATCH_PER_BLOCK * (layer->net.convolutional.rows * layer->input.matrix.channels + layer->net.convolutional.count); \
hipFuncSetCacheConfig(_cwc_kern_convolutional_backward_propagate_coefficient_rows<_x, _y, _z, BATCH_PER_BLOCK>, hipFuncCachePreferShared); \
hipLaunchKernelGGL(( _cwc_kern_convolutional_backward_propagate_coefficient_rows \
<_x, _y, _z, BATCH_PER_BLOCK>) \
, dim3(num_blocks_for_coeff), dim3(threads_per_block_for_coeff), shared_memory_size, stream, \
layer->net.convolutional.strides, layer->net.convolutional.border, batch, \
chm, layer->input.matrix.rows, layer->input.matrix.cols, layer->input.matrix.channels, \
cha, out_rows, out_cols, \
cbw, layer->net.convolutional.rows, layer->net.convolutional.cols, layer->net.convolutional.count); \
} while (0)
// special casing for image
cwc_vary_4_a(x, 1, 2, 3, 4, cwc_vary_4_b, y, 1, 2, 3, 4, cwc_vary_5_c, layer->net.convolutional.rows, 3, 5, 7, 9, 11, vary_block);
#undef vary_block
hipError_t error = hipGetLastError();
if (hipErrorInvalidConfiguration == error)
return -1;
assert(error == hipSuccess);
return 0;
}
static void _cwc_convnet_convolutional_backward_propagate_coefficient_rows(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, ccv_convnet_layer_t* configuration, float* scratch, float* unit, const hipStream_t& stream, const hipblasHandle_t& handle)
{
static int vary_x[] = { 1, 2, 3, 4 };
static int vary_y[] = { 1, 2, 3, 4 };
static int vary_z[] = { 1 };
// benchmarking requires it has no side effect
CWC_IMPLEMENT_VARY_STUB(EXTRA(layer)->vary.convolutional.backward.coefficient, vary_x, vary_y, vary_z, _cwc_convnet_convolutional_backward_propagate_coefficient_rows_vary, layer, batch, a, n, m, b, configuration, scratch, unit, stream, handle);
int out_rows, out_cols, out_partition;
ccv_convnet_make_output(layer, layer->input.matrix.rows, layer->input.matrix.cols, &out_rows, &out_cols, &out_partition);
float* cbw = scratch + layer->input.matrix.rows * layer->input.matrix.cols * layer->input.matrix.channels * batch + out_rows * out_cols * layer->net.convolutional.count * batch;
int count = layer->net.convolutional.rows * layer->net.convolutional.cols * layer->net.convolutional.count * layer->input.matrix.channels;
const int batch_group_count = batch / BATCH_PER_BLOCK;
// this has side-effect since it is accumulation
hipblasSgemv(handle, HIPBLAS_OP_N, count, out_rows * batch_group_count, &one, cbw, count, unit, 1, &one, configuration->w, 1);
}
static int _cwc_convnet_convolutional_backward_propagate_coefficient_default_vary(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, ccv_convnet_layer_t* configuration, float* scratch, float* unit, const hipStream_t& stream, const hipblasHandle_t& handle,
int x, int y, int z)
{
int out_rows, out_cols, out_partition;
ccv_convnet_make_output(layer, layer->input.matrix.rows, layer->input.matrix.cols, &out_rows, &out_cols, &out_partition);
if (!(layer->net.convolutional.count % (y * out_partition) == 0 && z % x == 0 && layer->net.convolutional.channels % (z * out_partition) == 0 &&
layer->net.convolutional.count / (y * out_partition) * z / x <= 1024 && /* thread per block constraint */
layer->net.convolutional.count / (y * out_partition) * z / x >= z && layer->net.convolutional.count / (y * out_partition) * z / x >= layer->net.convolutional.count / out_partition && /* shared loading constraint */
sizeof(float) * (z + layer->net.convolutional.count / out_partition) <= 32 * 1024 /* shared memory size constraint */))
return -1;
float* chm = scratch;
float* cha = scratch + layer->input.matrix.rows * layer->input.matrix.cols * layer->input.matrix.channels * batch;
float* cbw = scratch + layer->input.matrix.rows * layer->input.matrix.cols * layer->input.matrix.channels * batch + out_rows * out_cols * layer->net.convolutional.count * batch;
const int batch_group_count = batch / BATCH_PER_BLOCK;
assert((layer->input.matrix.channels / out_partition) % THREAD_PER_BLOCK == 0);
assert((layer->net.convolutional.count / out_partition) % THREAD_PER_BLOCK == 0);
assert(batch % THREAD_PER_BLOCK == 0);
hipLaunchKernelGGL(( _cwc_kern_reorder_matrix_major
<THREAD_PER_BLOCK>)
, dim3(dim3(layer->input.matrix.rows * layer->input.matrix.cols, (layer->input.matrix.channels / out_partition / THREAD_PER_BLOCK) * (batch / THREAD_PER_BLOCK), out_partition)), dim3(THREAD_PER_BLOCK), sizeof(float) * THREAD_PER_BLOCK * THREAD_PER_BLOCK, stream,
m, chm, layer->input.matrix.rows * layer->input.matrix.cols, layer->input.matrix.channels / out_partition, out_partition, batch);
hipLaunchKernelGGL(( _cwc_kern_reorder_matrix_major
<THREAD_PER_BLOCK>)
, dim3(dim3(out_rows * out_cols, (layer->net.convolutional.count / out_partition / THREAD_PER_BLOCK) * (batch / THREAD_PER_BLOCK), out_partition)), dim3(THREAD_PER_BLOCK), sizeof(float) * THREAD_PER_BLOCK * THREAD_PER_BLOCK, stream,
a, cha, out_rows * out_cols, layer->net.convolutional.count / out_partition, out_partition, batch);
#define vary_block(_x, _y, _z) do { \
dim3 threads_per_block_for_coeff(layer->net.convolutional.count / (_y * out_partition), _z / _x); \
assert(threads_per_block_for_coeff.x * threads_per_block_for_coeff.y <= 1024); \
dim3 num_blocks_for_coeff(layer->net.convolutional.cols, layer->net.convolutional.rows, layer->net.convolutional.channels / _z * batch_group_count); \
int shared_memory_size = sizeof(float) * (_z + layer->net.convolutional.count / out_partition); \
hipLaunchKernelGGL(( _cwc_kern_convolutional_backward_propagate_coefficient_default \
<_x, _y, _z, BATCH_PER_BLOCK>) \
, dim3(num_blocks_for_coeff), dim3(threads_per_block_for_coeff), shared_memory_size, stream, \
layer->net.convolutional.strides, layer->net.convolutional.border, batch, batch_group_count, \
chm, layer->input.matrix.rows, layer->input.matrix.cols, layer->input.matrix.channels / out_partition, out_partition, \
cha, out_rows, out_cols, \
cbw, layer->net.convolutional.rows, layer->net.convolutional.cols, layer->net.convolutional.count / out_partition); \
} while (0)
cwc_vary_6_a(x, 1, 2, 3, 4, 6, 8, cwc_vary_6_b, y, 1, 2, 3, 4, 6, 8, cwc_vary_4_c, z, 16, 24, 32, 36, vary_block);
#undef vary_block
hipError_t error = hipGetLastError();
if (hipErrorInvalidConfiguration == error)
return -1;
assert(error == hipSuccess);
return 0;
}
static void _cwc_convnet_convolutional_backward_propagate_coefficient_default(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, ccv_convnet_layer_t* configuration, float* scratch, float* unit, const hipStream_t& stream, const hipblasHandle_t& handle)
{
static int vary_x[] = { 1, 2, 3, 4, 6, 8 };
static int vary_y[] = { 1, 2, 3, 4, 6, 8 };
static int vary_z[] = { 16, 24, 32, 36 };
// benchmarking requires it has no side effect
CWC_IMPLEMENT_VARY_STUB(EXTRA(layer)->vary.convolutional.backward.coefficient, vary_x, vary_y, vary_z, _cwc_convnet_convolutional_backward_propagate_coefficient_default_vary, layer, batch, a, n, m, b, configuration, scratch, unit, stream, handle);
int out_rows, out_cols, out_partition;
ccv_convnet_make_output(layer, layer->input.matrix.rows, layer->input.matrix.cols, &out_rows, &out_cols, &out_partition);
float* cbw = scratch + layer->input.matrix.rows * layer->input.matrix.cols * layer->input.matrix.channels * batch + out_rows * out_cols * layer->net.convolutional.count * batch;
int count = layer->net.convolutional.rows * layer->net.convolutional.cols * layer->net.convolutional.count * layer->input.matrix.channels / out_partition;
const int batch_group_count = batch / BATCH_PER_BLOCK;
// this has side-effect since it is accumulation
hipblasSgemv(handle, HIPBLAS_OP_N, count, batch_group_count, &one, cbw, count, unit, 1, &one, configuration->w, 1);
}
static int _cwc_convnet_convolutional_backward_propagate_error_vary(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, ccv_convnet_layer_t* configuration, float* scratch, float* unit, const hipStream_t& stream, const hipblasHandle_t& handle,
int x, int y, int z)
{
int out_rows, out_cols, out_partition;
ccv_convnet_make_output(layer, layer->input.matrix.rows, layer->input.matrix.cols, &out_rows, &out_cols, &out_partition);
if (!(batch % x == 0 && z % y == 0 &&
layer->input.matrix.channels % (z * out_partition) == 0 &&
batch / x * z / y <= 1024 && /* thread per block constraint */
batch / x * z / y >= batch && batch / x * z / y >= z && /* shared memory loading constraint */
sizeof(float) * (batch + z) <= 48 * 1024 /* shared memory size constraint */))
return -1;
float* chw = scratch;
hipLaunchKernelGGL(( _cwc_kern_reorder_matrix_major_parted)
, dim3(dim3(layer->net.convolutional.rows * layer->net.convolutional.cols, layer->input.matrix.channels / out_partition)), dim3(dim3(layer->net.convolutional.count / out_partition, out_partition)), 0, stream,
layer->w, chw, layer->net.convolutional.rows * layer->net.convolutional.cols, layer->input.matrix.channels, layer->net.convolutional.count, layer->input.matrix.channels / out_partition, layer->net.convolutional.count / out_partition, out_partition);
#define vary_block(_x, _y, _z, _s) do { \
dim3 threads_per_block(batch / _x, _z / _y); \
assert(threads_per_block.x * threads_per_block.y <= 1024); \
dim3 num_blocks(layer->input.matrix.cols * layer->input.matrix.channels / (_z * out_partition), layer->input.matrix.rows, out_partition); \
int shared_memory_size = sizeof(float) * (batch + _z); \
hipFuncSetCacheConfig(_cwc_kern_convolutional_backward_propagate_error<_x, _y, _z, _s>, hipFuncCachePreferShared); \
hipLaunchKernelGGL(( _cwc_kern_convolutional_backward_propagate_error \
<_x, _y, _z, _s>) \
, dim3(num_blocks), dim3(threads_per_block), shared_memory_size, stream, \
layer->net.convolutional.border, batch, \
b, layer->input.matrix.rows, layer->input.matrix.cols, layer->input.matrix.channels, \
a, out_rows, out_cols, \
chw, layer->net.convolutional.rows, layer->net.convolutional.cols, layer->net.convolutional.count / out_partition, out_partition); \
} while (0)
cwc_vary_4_a(x, 1, 2, 4, 8, cwc_vary_5_b, y, 1, 2, 4, 6, 8, cwc_vary_6_c, z, 16, 24, 32, 36, 64, 72, cwc_vary_4_d, layer->net.convolutional.strides, 1, 2, 3, 4, vary_block);
#undef vary_block
hipError_t error = hipGetLastError();
if (hipErrorInvalidConfiguration == error)
return -1;
assert(error == hipSuccess);
return 0;
}
static void _cwc_convnet_convolutional_backward_propagate_error(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, ccv_convnet_layer_t* configuration, float* scratch, float* unit, const hipStream_t& stream, const hipblasHandle_t& handle)
{
static int vary_x[] = { 1, 2, 4, 8 };
static int vary_y[] = { 1, 2, 4, 6, 8 };
static int vary_z[] = { 16, 24, 32, 36, 64, 72 };
CWC_IMPLEMENT_VARY_STUB(EXTRA(layer)->vary.convolutional.backward.gradient, vary_x, vary_y, vary_z, _cwc_convnet_convolutional_backward_propagate_error_vary, layer, batch, a, n, m, b, configuration, scratch, unit, stream, handle);
}
void cwc_convnet_convolutional_backward_propagate(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, ccv_convnet_layer_t* configuration, float* scratch, float* unit, const hipStream_t& stream, const hipblasHandle_t& handle)
{
assert(layer->net.convolutional.count % 4 == 0);
assert(batch % BATCH_PER_BLOCK == 0);
int out_rows, out_cols, out_partition;
ccv_convnet_make_output(layer, layer->input.matrix.rows, layer->input.matrix.cols, &out_rows, &out_cols, &out_partition);
// it turns out that first apply relu would save us a lot of computation because no need to low both out and out_grad any more
hipLaunchKernelGGL(( cwc_kern_relu_backward_propagate)
, dim3(dim3(out_cols, out_rows, layer->net.convolutional.count)), dim3(batch), 0, stream,
batch, n, a, out_rows, out_cols, layer->net.convolutional.count);
assert(hipGetLastError() == hipSuccess);
if (cwc_convnet_layer_use_rows(layer))
_cwc_convnet_convolutional_backward_propagate_coefficient_rows(layer, batch, a, n, m, b, configuration, scratch, unit, stream, handle);
else
_cwc_convnet_convolutional_backward_propagate_coefficient_default(layer, batch, a, n, m, b, configuration, scratch, unit, stream, handle);
// compute the bias directly using gemv routine
hipblasSgemv(handle, HIPBLAS_OP_T, out_rows * out_cols * batch, layer->net.convolutional.count, &one, a, out_rows * out_cols * batch, unit, 1, &one, configuration->bias, 1);
assert(hipGetLastError() == hipSuccess);
if (b)
_cwc_convnet_convolutional_backward_propagate_error(layer, batch, a, n, m, b, configuration, scratch, unit, stream, handle);
}
| 939646ea16fc3bc7914efdffeb401fb51c2bd74d.cu | #include <cuda.h>
#include <cublas_v2.h>
extern "C" {
#include "../cwc.h"
#include "../cwc_internal.h"
}
#include "../../inl/ccv_convnet_inl.h"
template <int input_per_thread, int filter_per_thread, int filter_per_block>
__global__ static void _cwc_kern_convolutional_forward_propagate(const int strides, const int border, const int batch,
float* input, const int rows, const int cols, const int channels_per_partition, const int partition,
float* out, const int out_rows, const int out_cols,
float* filter, const int filter_rows, const int filter_cols, const int count,
float* const biases)
{
assert(gridDim.x * partition * filter_per_block == out_cols * count);
assert(gridDim.y == out_rows);
assert(gridDim.z == partition);
extern __shared__ float shared[];
float* shared_block = &shared[0];
float* shared_weights = &shared[batch];
float* shared_bias = &shared[batch + filter_per_block];
float prod[filter_per_thread][input_per_thread];
assert(batch == input_per_thread * blockDim.x);
assert(filter_per_block == filter_per_thread * blockDim.y);
const int thidx = threadIdx.x + threadIdx.y * blockDim.x;
const int thcnt = blockDim.x * blockDim.y;
int c, i, j, x, y;
#pragma unroll
for (i = 0; i < filter_per_thread; i++)
#pragma unroll
for (j = 0; j < input_per_thread; j++)
prod[i][j] = 0;
const int origin_x = blockIdx.x % out_cols;
const int origin_y = blockIdx.y;
const int filter_group_idx = blockIdx.z * count / (filter_per_block * partition) + blockIdx.x / out_cols; // for the partitioned filter group
input += (blockIdx.z * channels_per_partition * rows * cols + origin_y * strides * cols + origin_x * strides) * batch;
assert(thcnt >= batch);
assert(thcnt >= filter_per_block);
if (thidx < filter_per_block)
shared_bias[thidx] = biases[filter_group_idx * filter_per_block + thidx];
const int start_x = max(origin_x * strides - border, 0) - (origin_x * strides - border);
const int end_x = min(origin_x * strides - border + filter_cols, cols) - (origin_x * strides - border);
const int start_y = max(origin_y * strides - border, 0) - (origin_y * strides - border);
const int end_y = min(origin_y * strides - border + filter_rows, rows) - (origin_y * strides - border);
filter += filter_group_idx * filter_per_block;
for (c = 0; c < channels_per_partition; c++)
{
for (y = start_y; y < end_y; y++)
for (x = start_x; x < end_x; x++)
{
if (thidx < batch)
shared_block[thidx] = input[((y - border) * cols + x - border) * batch + thidx];
if (thidx < filter_per_block)
shared_weights[thidx] = filter[(y * filter_cols + x) * count + thidx];
__syncthreads();
#pragma unroll
for (i = 0; i < filter_per_thread; i++)
#pragma unroll
for (j = 0; j < input_per_thread; j++)
prod[i][j] += shared_block[j + threadIdx.x * input_per_thread] * shared_weights[i + threadIdx.y * filter_per_thread];
__syncthreads();
}
input += rows * cols * batch;
filter += filter_rows * filter_cols * count;
}
const int outcnt = out_rows * out_cols * batch;
out += (filter_group_idx * filter_per_block + threadIdx.y * filter_per_thread) * outcnt + (origin_y * out_cols + origin_x) * batch + threadIdx.x * input_per_thread;
#pragma unroll
for (i = 0; i < filter_per_thread; i++)
{
const float bias = shared_bias[i + threadIdx.y * filter_per_thread];
#pragma unroll
for (j = 0; j < input_per_thread; j++)
out[j] = max(0.0, prod[i][j] + bias);
out += outcnt;
}
}
static int _cwc_convnet_convolutional_forward_propagate_vary(ccv_convnet_layer_t* layer, int rows, int cols, int batch, float* a, float* b, const cudaStream_t& stream,
int x, int y, int z) // these are the dynamic configurations
{
int out_rows, out_cols, out_partition;
ccv_convnet_make_output(layer, rows, cols, &out_rows, &out_cols, &out_partition);
// first do configuration validation
if (!(batch % x == 0 && z % y == 0 && layer->net.convolutional.count % (z * out_partition) == 0 &&
batch / x * z / y <= 1024 && /* thread number constraint */
batch / x * z / y >= batch && batch / x * z / y >= z && /* kernel internal loading constraint */
sizeof(float) * (batch + z * 2) <= 48 * 1024 /* shared memory size constraint */))
return -1;
assert(b);
#define vary_block(_x, _y, _z) do { \
dim3 threads_per_block(batch / _x, _z / _y); \
assert(threads_per_block.x * threads_per_block.y <= 1024); \
dim3 num_blocks(out_cols * layer->net.convolutional.count / (_z * out_partition), out_rows, out_partition); \
int shared_memory_size = sizeof(float) * (batch + _z * 2); \
cudaFuncSetCacheConfig(_cwc_kern_convolutional_forward_propagate<_x, _y, _z>, cudaFuncCachePreferShared); \
_cwc_kern_convolutional_forward_propagate \
<_x, _y, _z> \
<<<num_blocks, threads_per_block, shared_memory_size, stream>>> \
(layer->net.convolutional.strides, layer->net.convolutional.border, batch, \
a, rows, cols, layer->input.matrix.channels / out_partition, out_partition, \
b, out_rows, out_cols, \
layer->w, layer->net.convolutional.rows, layer->net.convolutional.cols, layer->net.convolutional.count, \
layer->bias); \
} while (0)
cwc_vary_4_a(x, 1, 2, 4, 8, cwc_vary_5_b, y, 1, 2, 4, 6, 8, cwc_vary_6_c, z, 16, 24, 32, 36, 64, 72, vary_block);
#undef vary_block
cudaError_t error = cudaGetLastError();
if (cudaErrorInvalidConfiguration == error)
return -1;
assert(error == cudaSuccess);
return 0;
}
void cwc_convnet_convolutional_forward_propagate(ccv_convnet_layer_t* layer, int rows, int cols, int batch, float* a, float* b, const cudaStream_t& stream)
{
static int vary_x[] = { 1, 2, 4, 8 };
static int vary_y[] = { 1, 2, 4, 6, 8 };
static int vary_z[] = { 16, 24, 32, 36, 64, 72 };
CWC_IMPLEMENT_VARY_STUB(EXTRA(layer)->vary.convolutional.forward, vary_x, vary_y, vary_z, _cwc_convnet_convolutional_forward_propagate_vary, layer, rows, cols, batch, a, b, stream);
}
template <int channel_per_thread, int filter_per_thread, int channel_per_block, int batch_per_block>
__global__ static void _cwc_kern_convolutional_backward_propagate_coefficient_default(const int strides, const int border, const int batch, const int batch_group_count,
float* input, const int rows, const int cols, const int channels_per_partition, const int partition,
float* out_grad, const int out_rows, const int out_cols,
float* coeff, const int filter_rows, const int filter_cols, const int count_per_partition)
{
assert(gridDim.x == filter_cols);
assert(gridDim.y == filter_rows);
assert(gridDim.z * channel_per_block * batch_per_block == channels_per_partition * partition * batch);
assert(batch == batch_per_block * batch_group_count);
extern __shared__ float shared[];
float* shared_input = &shared[0];
float* shared_out_grad = &shared[channel_per_block];
const int thidx = threadIdx.x + threadIdx.y * blockDim.x;
const int thcnt = blockDim.x * blockDim.y;
assert(blockDim.x * filter_per_thread == count_per_partition);
assert(blockDim.y * channel_per_thread == channel_per_block);
assert(thcnt >= channel_per_block);
assert(thcnt >= count_per_partition);
const int origin_x = blockIdx.x;
const int origin_y = blockIdx.y;
const int channel_group_count = channels_per_partition / channel_per_block;
const int partition_idx = blockIdx.z / (channel_group_count * batch_group_count);
const int batch_group_idx = (blockIdx.z % (channel_group_count * batch_group_count)) / channel_group_count;
const int channel_group_idx = blockIdx.z % channel_group_count;
const int start_x = max(origin_x - border, 0) - (origin_x - border);
const int end_x = min(out_cols, (cols + border - origin_x + strides - 1) / strides);
const int start_y = max(origin_y - border, 0) - (origin_y - border);
const int end_y = min(out_rows, (rows + border - origin_y + strides - 1) / strides);
input += (partition_idx * batch + batch_group_idx * batch_per_block) * rows * cols * channels_per_partition + (origin_y * cols + origin_x) * channels_per_partition + channel_group_idx * channel_per_block;
out_grad += (partition_idx * batch + batch_group_idx * batch_per_block) * out_rows * out_cols * count_per_partition;
int i, j, c, x, y;
float prod[channel_per_thread][filter_per_thread];
#pragma unroll
for (i = 0; i < channel_per_thread; i++)
#pragma unroll
for (j = 0; j < filter_per_thread; j++)
prod[i][j] = 0;
for (c = 0; c < batch_per_block; c++)
{
for (y = start_y; y < end_y; y++)
for (x = start_x; x < end_x; x++)
{
if (thidx < count_per_partition)
shared_out_grad[thidx] = out_grad[(y * out_cols + x) * count_per_partition + thidx];
if (thidx < channel_per_block)
shared_input[thidx] = input[((y * strides - border) * cols + x * strides - border) * channels_per_partition + thidx];
__syncthreads();
#pragma unroll
for (i = 0; i < channel_per_thread; i++)
#pragma unroll
for (j = 0; j < filter_per_thread; j++)
prod[i][j] += shared_input[i + threadIdx.y * channel_per_thread] * shared_out_grad[j + threadIdx.x * filter_per_thread];
__syncthreads();
}
input += rows * cols * channels_per_partition;
out_grad += out_rows * out_cols * count_per_partition;
}
const int cocnt = filter_cols * filter_rows * count_per_partition * partition;
coeff += cocnt * (channels_per_partition * batch_group_idx + channel_group_idx * channel_per_block) + (origin_y * filter_cols + origin_x) * count_per_partition * partition + partition_idx * count_per_partition;
#pragma unroll
for (i = 0; i < channel_per_thread; i++)
#pragma unroll
for (j = 0; j < filter_per_thread; j++)
coeff[(i + threadIdx.y * channel_per_thread) * cocnt + j + threadIdx.x * filter_per_thread] = prod[i][j];
}
template <int channel_per_thread, int filter_per_thread, int static_filter_rows, int batch_per_block>
__global__ static void _cwc_kern_convolutional_backward_propagate_coefficient_rows(const int strides, const int border, const int batch,
float* input, const int rows, const int cols, const int channels,
float* out_grad, const int out_rows, const int out_cols,
float* coeff, const int filter_rows, const int filter_cols, const int count)
{
assert(gridDim.x == filter_cols);
assert(gridDim.y == out_rows);
assert(static_filter_rows == filter_rows);
extern __shared__ float shared[];
float* shared_input = &shared[0];
float* shared_out_grad = &shared[filter_rows * channels * batch_per_block];
const int thidx = threadIdx.x + threadIdx.y * blockDim.x;
const int thcnt = blockDim.x * blockDim.y;
assert(blockDim.x * filter_per_thread == count);
assert(blockDim.y * channel_per_thread == channels);
assert(thcnt >= channels * batch_per_block);
assert(thcnt >= count);
const int origin_x = blockIdx.x;
const int batch_group_idx = blockIdx.z;
const int start_x = max(origin_x - border, 0) - (origin_x - border);
const int end_x = min(out_cols, (cols + border - origin_x + strides - 1) / strides);
input += (rows * cols * channels * batch_group_idx + origin_x * channels) * batch_per_block;
out_grad += out_rows * out_cols * count * batch_group_idx * batch_per_block;
int i, j, k, c, x;
const int y = blockIdx.y;
float prod[static_filter_rows][channel_per_thread][filter_per_thread];
#pragma unroll
for (i = 0; i < static_filter_rows; i++)
#pragma unroll
for (j = 0; j < channel_per_thread; j++)
#pragma unroll
for (k = 0; k < filter_per_thread; k++)
prod[i][j][k] = 0;
const int iy = y * strides - border;
input += y * strides * cols * channels * batch_per_block;
out_grad += y * out_cols * count * batch_per_block;
for (x = start_x; x < end_x; x++)
{
if (thidx < channels * batch_per_block)
#pragma unroll
for (i = 0; i < static_filter_rows; i++)
shared_input[i * channels * batch_per_block + thidx] = (i + iy >= 0 && i + iy < rows) ? input[((i - border) * cols + x * strides - border) * channels * batch_per_block + thidx] : 0;
if (thidx < count)
#pragma unroll
for (c = 0; c < batch_per_block; c++)
shared_out_grad[c * count + thidx] = out_grad[x * count * batch_per_block + c * count + thidx];
__syncthreads();
#pragma unroll
for (i = 0; i < static_filter_rows; i++)
#pragma unroll
for (j = 0; j < channel_per_thread; j++)
#pragma unroll
for (k = 0; k < filter_per_thread; k++)
{
float sum = 0;
#pragma unroll
for (c = 0; c < batch_per_block; c++)
sum += shared_input[i * channels * batch_per_block + c * channels + j + threadIdx.y * channel_per_thread] * shared_out_grad[c * count + k + threadIdx.x * filter_per_thread];
prod[i][j][k] += sum;
}
__syncthreads();
}
const int cocnt = filter_cols * filter_rows * count;
coeff += cocnt * channels * (blockIdx.y + blockIdx.z * out_rows) + origin_x * count;
#pragma unroll
for (i = 0; i < channel_per_thread; i++)
#pragma unroll
for (j = 0; j < static_filter_rows; j++)
#pragma unroll
for (k = 0; k < filter_per_thread; k++)
coeff[(i + threadIdx.y * channel_per_thread) * cocnt + j * filter_cols * count + k + threadIdx.x * filter_per_thread] = prod[j][i][k];
}
template <int input_per_thread, int channel_per_thread, int channel_per_block, int strides>
__global__ static void _cwc_kern_convolutional_backward_propagate_error(const int border, const int batch,
float* input_grad, const int rows, const int cols, const int channels,
float* out_grad, const int out_rows, const int out_cols,
float* filter, const int filter_rows, const int filter_cols, const int count_per_partition, const int partition)
{
assert(gridDim.z == partition);
extern __shared__ float shared[];
float* shared_grad = &shared[0];
float* shared_weights = &shared[batch];
float prod[input_per_thread][channel_per_thread];
assert(batch == input_per_thread * blockDim.x);
assert(channel_per_block == channel_per_thread * blockDim.y);
const int thidx = threadIdx.x + threadIdx.y * blockDim.x;
const int thcnt = blockDim.x * blockDim.y;
assert(thcnt >= batch);
assert(thcnt >= channel_per_block);
const int origin_x = blockIdx.x % cols;
const int origin_y = blockIdx.y;
const int channel_group_idx = blockIdx.z * channels / (channel_per_block * partition) + blockIdx.x / cols;
int i, j, k, c, x, y;
#pragma unroll
for (i = 0; i < input_per_thread; i++)
#pragma unroll
for (j = 0; j < channel_per_thread; j++)
prod[i][j] = 0;
const int ycnt = (filter_rows - 1 - (origin_x + border) % strides) / strides + 1;
const int xcnt = (filter_cols - 1 - (origin_y + border) % strides) / strides + 1;
const int filter_y = (ycnt - 1) * strides + (origin_x + border) % strides;
assert(filter_y < filter_rows);
const int filter_x = (xcnt - 1) * strides + (origin_y + border) % strides;
assert(filter_x < filter_cols);
const int out_y = (origin_x + border) / strides - ycnt + 1;
const int out_x = (origin_y + border) / strides - xcnt + 1;
const int out_start_y = max(out_y, 0);
const int out_start_x = max(out_x, 0);
const int filter_start_y = filter_y - (out_start_y - out_y) * strides;
const int filter_start_x = filter_x - (out_start_x - out_x) * strides;
out_grad += (blockIdx.z * count_per_partition * out_rows * out_cols + out_start_y * out_cols + out_start_x) * batch;
const int out_end_y = out_y + ycnt - 1;
const int out_end_x = out_x + xcnt - 1;
const int filter_end_y = (origin_x + border) % strides + (out_end_y - min(out_end_y, out_rows - 1)) * strides;
const int filter_end_x = (origin_y + border) % strides + (out_end_x - min(out_end_x, out_cols - 1)) * strides;
const int outcnt = out_rows * out_cols * batch;
filter += channel_group_idx * channel_per_block;
for (k = 0; k < count_per_partition; k++)
{
float* out_grad_per_filter = out_grad + k * outcnt;
for (y = filter_start_y; y >= filter_end_y; y -= strides)
{
for (x = filter_start_x, c = 0; x >= filter_end_x; x -= strides, c++)
{
if (thidx < batch)
shared_grad[thidx] = out_grad_per_filter[c * batch + thidx];
if (thidx < channel_per_block)
shared_weights[thidx] = filter[(y * filter_cols + x) * channels + thidx];
__syncthreads();
#pragma unroll
for (i = 0; i < input_per_thread; i++)
#pragma unroll
for (j = 0; j < channel_per_thread; j++)
prod[i][j] += shared_grad[i + threadIdx.x * input_per_thread] * shared_weights[j + threadIdx.y * channel_per_thread];
__syncthreads();
}
out_grad_per_filter += out_cols * batch;
}
filter += filter_rows * filter_cols * channels;
}
const int incnt = rows * cols * batch;
input_grad += channel_group_idx * channel_per_block * incnt + (origin_x * cols + origin_y) * batch;
#pragma unroll
for (i = 0; i < channel_per_thread; i++)
#pragma unroll
for (j = 0; j < input_per_thread; j++)
input_grad[(i + threadIdx.y * channel_per_thread) * incnt + j + threadIdx.x * input_per_thread] = prod[j][i];
}
// this method rewinds a matrix
template <int reorder_per_block>
__global__ static void _cwc_kern_reorder_matrix_major(float* a, float* b, const int count, const int channels_per_partition, const int partition, const int batch)
{
assert(blockDim.x == reorder_per_block);
const int batch_group_idx = blockIdx.y % (batch / reorder_per_block);
const int channel_group_idx = blockIdx.y / (batch / reorder_per_block);
a += (blockIdx.z * count * channels_per_partition + blockIdx.x + channel_group_idx * reorder_per_block * count) * batch + batch_group_idx * reorder_per_block;
b += (blockIdx.z * count * batch + batch_group_idx * reorder_per_block * count + blockIdx.x) * channels_per_partition + channel_group_idx * reorder_per_block;
__shared__ float prod[reorder_per_block][reorder_per_block];
int i;
#pragma unroll
for (i = 0; i < reorder_per_block; i++)
prod[i][threadIdx.x] = a[i * count * batch + threadIdx.x];
__syncthreads();
#pragma unroll
for (i = 0; i < reorder_per_block; i++)
b[i * count * channels_per_partition + threadIdx.x] = prod[threadIdx.x][i];
}
// this method rewinds a matrix
__global__ static void _cwc_kern_reorder_matrix_major_parted(float* a, float* b, const int count, const int channels, const int batch, const int channels_per_partition, const int batch_per_partition, const int partition)
{
b[(threadIdx.x * count + blockIdx.x) * channels + blockIdx.y + threadIdx.y * channels_per_partition] = a[(blockIdx.y * count + blockIdx.x) * batch + threadIdx.x + threadIdx.y * batch_per_partition];
}
// this method rewinds a matrix
template <int batch_per_block>
__global__ static void _cwc_kern_reorder_matrix_major_per_block_rows(float* a, float* b, const int count, const int channels, const int batch)
{
const int thidx = blockIdx.y * batch_per_block + threadIdx.y;
b[(blockIdx.y * count + blockIdx.x) * channels * batch_per_block + threadIdx.y * channels + threadIdx.x] = a[(threadIdx.x * count + blockIdx.x) * batch + thidx];
}
// this method rewinds a matrix
template <int channel_per_block, int batch_per_block, int batch_group_per_block>
__global__ static void _cwc_kern_reorder_matrix_major_per_block(float* a, float* b, const int count, const int channels, const int batch)
{
const int batch_group_idx = blockIdx.y % (batch / (batch_per_block * batch_group_per_block));
const int channel_group_idx = blockIdx.y / (batch / (batch_per_block * batch_group_per_block));
a += (channel_group_idx * channel_per_block * count + blockIdx.x) * batch + batch_group_idx * batch_per_block * batch_group_per_block;
b += (batch_group_idx * batch_group_per_block * count + blockIdx.x) * channels * batch_per_block + channel_group_idx * channel_per_block;
__shared__ float prod[channel_per_block][batch_per_block * batch_group_per_block];
int i, j;
#pragma unroll
for (i = 0; i < channel_per_block; i++)
prod[i][threadIdx.x] = a[i * count * batch + threadIdx.x];
__syncthreads();
if (threadIdx.x < channel_per_block)
#pragma unroll
for (i = 0; i < batch_group_per_block; i++)
#pragma unroll
for (j = 0; j < batch_per_block; j++)
b[(i * count * batch_per_block + j) * channels + threadIdx.x] = prod[threadIdx.x][i * batch_per_block + j];
}
static void _cwc_convnet_reorder_matrix_major_per_block(float* a, float* b, const int count, const int channels, const int batch, const cudaStream_t& stream)
{
// this is by experience, ideally, this can be profile-guided too
const int batch_group_count = batch / BATCH_PER_BLOCK;
if (channels < 8)
{
assert(batch % BATCH_PER_BLOCK == 0);
assert(channels * BATCH_PER_BLOCK <= 1024);
_cwc_kern_reorder_matrix_major_per_block_rows
<BATCH_PER_BLOCK>
<<<dim3(count, batch_group_count), dim3(channels, BATCH_PER_BLOCK), 0, stream>>>
(a, b, count, channels, batch);
} else {
assert(channels % THREAD_PER_BLOCK == 0);
assert(THREAD_PER_BLOCK % BATCH_PER_BLOCK == 0);
assert(batch % THREAD_PER_BLOCK == 0);
_cwc_kern_reorder_matrix_major_per_block
<THREAD_PER_BLOCK, BATCH_PER_BLOCK, THREAD_PER_BLOCK / BATCH_PER_BLOCK>
<<<dim3(count, (channels / THREAD_PER_BLOCK) * (batch / THREAD_PER_BLOCK)), THREAD_PER_BLOCK, sizeof(float) * THREAD_PER_BLOCK * THREAD_PER_BLOCK, stream>>>
(a, b, count, channels, batch);
}
}
static int _cwc_convnet_convolutional_backward_propagate_coefficient_rows_vary(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, ccv_convnet_layer_t* configuration, float* scratch, float* unit, const cudaStream_t& stream, const cublasHandle_t& handle,
int x, int y, int z)
{
if (!(layer->net.convolutional.count % y == 0 && layer->input.matrix.channels % x == 0 &&
layer->net.convolutional.count / y * layer->input.matrix.channels / x <= 1024 && /* thread per block constraint */
layer->net.convolutional.count / y * layer->input.matrix.channels / x >= layer->input.matrix.channels * BATCH_PER_BLOCK &&
layer->net.convolutional.count / y * layer->input.matrix.channels / x >= layer->net.convolutional.count && /* shared loading constraint */
sizeof(float) * BATCH_PER_BLOCK * (layer->net.convolutional.rows * layer->input.matrix.channels + layer->net.convolutional.count) <= 48 * 1024 /* shared memory size constraint */))
return -1;
int out_rows, out_cols, out_partition;
ccv_convnet_make_output(layer, layer->input.matrix.rows, layer->input.matrix.cols, &out_rows, &out_cols, &out_partition);
assert(out_partition == 1); // this cannot handle partition
float* chm = scratch;
float* cha = scratch + layer->input.matrix.rows * layer->input.matrix.cols * layer->input.matrix.channels * batch;
float* cbw = scratch + layer->input.matrix.rows * layer->input.matrix.cols * layer->input.matrix.channels * batch + out_rows * out_cols * layer->net.convolutional.count * batch;
const int batch_group_count = batch / BATCH_PER_BLOCK;
_cwc_convnet_reorder_matrix_major_per_block
(m, chm, layer->input.matrix.rows * layer->input.matrix.cols, layer->input.matrix.channels, batch, stream);
_cwc_convnet_reorder_matrix_major_per_block
(a, cha, out_rows * out_cols, layer->net.convolutional.count, batch, stream);
#define vary_block(_x, _y, _z) do { \
dim3 threads_per_block_for_coeff(layer->net.convolutional.count / _y, layer->input.matrix.channels / _x); \
assert(threads_per_block_for_coeff.x * threads_per_block_for_coeff.y <= 1024); \
dim3 num_blocks_for_coeff(layer->net.convolutional.cols, out_rows, batch_group_count); \
int shared_memory_size = sizeof(float) * BATCH_PER_BLOCK * (layer->net.convolutional.rows * layer->input.matrix.channels + layer->net.convolutional.count); \
cudaFuncSetCacheConfig(_cwc_kern_convolutional_backward_propagate_coefficient_rows<_x, _y, _z, BATCH_PER_BLOCK>, cudaFuncCachePreferShared); \
_cwc_kern_convolutional_backward_propagate_coefficient_rows \
<_x, _y, _z, BATCH_PER_BLOCK> \
<<<num_blocks_for_coeff, threads_per_block_for_coeff, shared_memory_size, stream>>> \
(layer->net.convolutional.strides, layer->net.convolutional.border, batch, \
chm, layer->input.matrix.rows, layer->input.matrix.cols, layer->input.matrix.channels, \
cha, out_rows, out_cols, \
cbw, layer->net.convolutional.rows, layer->net.convolutional.cols, layer->net.convolutional.count); \
} while (0)
// special casing for image
cwc_vary_4_a(x, 1, 2, 3, 4, cwc_vary_4_b, y, 1, 2, 3, 4, cwc_vary_5_c, layer->net.convolutional.rows, 3, 5, 7, 9, 11, vary_block);
#undef vary_block
cudaError_t error = cudaGetLastError();
if (cudaErrorInvalidConfiguration == error)
return -1;
assert(error == cudaSuccess);
return 0;
}
static void _cwc_convnet_convolutional_backward_propagate_coefficient_rows(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, ccv_convnet_layer_t* configuration, float* scratch, float* unit, const cudaStream_t& stream, const cublasHandle_t& handle)
{
static int vary_x[] = { 1, 2, 3, 4 };
static int vary_y[] = { 1, 2, 3, 4 };
static int vary_z[] = { 1 };
// benchmarking requires it has no side effect
CWC_IMPLEMENT_VARY_STUB(EXTRA(layer)->vary.convolutional.backward.coefficient, vary_x, vary_y, vary_z, _cwc_convnet_convolutional_backward_propagate_coefficient_rows_vary, layer, batch, a, n, m, b, configuration, scratch, unit, stream, handle);
int out_rows, out_cols, out_partition;
ccv_convnet_make_output(layer, layer->input.matrix.rows, layer->input.matrix.cols, &out_rows, &out_cols, &out_partition);
float* cbw = scratch + layer->input.matrix.rows * layer->input.matrix.cols * layer->input.matrix.channels * batch + out_rows * out_cols * layer->net.convolutional.count * batch;
int count = layer->net.convolutional.rows * layer->net.convolutional.cols * layer->net.convolutional.count * layer->input.matrix.channels;
const int batch_group_count = batch / BATCH_PER_BLOCK;
// this has side-effect since it is accumulation
cublasSgemv(handle, CUBLAS_OP_N, count, out_rows * batch_group_count, &one, cbw, count, unit, 1, &one, configuration->w, 1);
}
static int _cwc_convnet_convolutional_backward_propagate_coefficient_default_vary(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, ccv_convnet_layer_t* configuration, float* scratch, float* unit, const cudaStream_t& stream, const cublasHandle_t& handle,
int x, int y, int z)
{
int out_rows, out_cols, out_partition;
ccv_convnet_make_output(layer, layer->input.matrix.rows, layer->input.matrix.cols, &out_rows, &out_cols, &out_partition);
if (!(layer->net.convolutional.count % (y * out_partition) == 0 && z % x == 0 && layer->net.convolutional.channels % (z * out_partition) == 0 &&
layer->net.convolutional.count / (y * out_partition) * z / x <= 1024 && /* thread per block constraint */
layer->net.convolutional.count / (y * out_partition) * z / x >= z && layer->net.convolutional.count / (y * out_partition) * z / x >= layer->net.convolutional.count / out_partition && /* shared loading constraint */
sizeof(float) * (z + layer->net.convolutional.count / out_partition) <= 32 * 1024 /* shared memory size constraint */))
return -1;
float* chm = scratch;
float* cha = scratch + layer->input.matrix.rows * layer->input.matrix.cols * layer->input.matrix.channels * batch;
float* cbw = scratch + layer->input.matrix.rows * layer->input.matrix.cols * layer->input.matrix.channels * batch + out_rows * out_cols * layer->net.convolutional.count * batch;
const int batch_group_count = batch / BATCH_PER_BLOCK;
assert((layer->input.matrix.channels / out_partition) % THREAD_PER_BLOCK == 0);
assert((layer->net.convolutional.count / out_partition) % THREAD_PER_BLOCK == 0);
assert(batch % THREAD_PER_BLOCK == 0);
_cwc_kern_reorder_matrix_major
<THREAD_PER_BLOCK>
<<<dim3(layer->input.matrix.rows * layer->input.matrix.cols, (layer->input.matrix.channels / out_partition / THREAD_PER_BLOCK) * (batch / THREAD_PER_BLOCK), out_partition), THREAD_PER_BLOCK, sizeof(float) * THREAD_PER_BLOCK * THREAD_PER_BLOCK, stream>>>
(m, chm, layer->input.matrix.rows * layer->input.matrix.cols, layer->input.matrix.channels / out_partition, out_partition, batch);
_cwc_kern_reorder_matrix_major
<THREAD_PER_BLOCK>
<<<dim3(out_rows * out_cols, (layer->net.convolutional.count / out_partition / THREAD_PER_BLOCK) * (batch / THREAD_PER_BLOCK), out_partition), THREAD_PER_BLOCK, sizeof(float) * THREAD_PER_BLOCK * THREAD_PER_BLOCK, stream>>>
(a, cha, out_rows * out_cols, layer->net.convolutional.count / out_partition, out_partition, batch);
#define vary_block(_x, _y, _z) do { \
dim3 threads_per_block_for_coeff(layer->net.convolutional.count / (_y * out_partition), _z / _x); \
assert(threads_per_block_for_coeff.x * threads_per_block_for_coeff.y <= 1024); \
dim3 num_blocks_for_coeff(layer->net.convolutional.cols, layer->net.convolutional.rows, layer->net.convolutional.channels / _z * batch_group_count); \
int shared_memory_size = sizeof(float) * (_z + layer->net.convolutional.count / out_partition); \
_cwc_kern_convolutional_backward_propagate_coefficient_default \
<_x, _y, _z, BATCH_PER_BLOCK> \
<<<num_blocks_for_coeff, threads_per_block_for_coeff, shared_memory_size, stream>>> \
(layer->net.convolutional.strides, layer->net.convolutional.border, batch, batch_group_count, \
chm, layer->input.matrix.rows, layer->input.matrix.cols, layer->input.matrix.channels / out_partition, out_partition, \
cha, out_rows, out_cols, \
cbw, layer->net.convolutional.rows, layer->net.convolutional.cols, layer->net.convolutional.count / out_partition); \
} while (0)
cwc_vary_6_a(x, 1, 2, 3, 4, 6, 8, cwc_vary_6_b, y, 1, 2, 3, 4, 6, 8, cwc_vary_4_c, z, 16, 24, 32, 36, vary_block);
#undef vary_block
cudaError_t error = cudaGetLastError();
if (cudaErrorInvalidConfiguration == error)
return -1;
assert(error == cudaSuccess);
return 0;
}
static void _cwc_convnet_convolutional_backward_propagate_coefficient_default(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, ccv_convnet_layer_t* configuration, float* scratch, float* unit, const cudaStream_t& stream, const cublasHandle_t& handle)
{
static int vary_x[] = { 1, 2, 3, 4, 6, 8 };
static int vary_y[] = { 1, 2, 3, 4, 6, 8 };
static int vary_z[] = { 16, 24, 32, 36 };
// benchmarking requires it has no side effect
CWC_IMPLEMENT_VARY_STUB(EXTRA(layer)->vary.convolutional.backward.coefficient, vary_x, vary_y, vary_z, _cwc_convnet_convolutional_backward_propagate_coefficient_default_vary, layer, batch, a, n, m, b, configuration, scratch, unit, stream, handle);
int out_rows, out_cols, out_partition;
ccv_convnet_make_output(layer, layer->input.matrix.rows, layer->input.matrix.cols, &out_rows, &out_cols, &out_partition);
float* cbw = scratch + layer->input.matrix.rows * layer->input.matrix.cols * layer->input.matrix.channels * batch + out_rows * out_cols * layer->net.convolutional.count * batch;
int count = layer->net.convolutional.rows * layer->net.convolutional.cols * layer->net.convolutional.count * layer->input.matrix.channels / out_partition;
const int batch_group_count = batch / BATCH_PER_BLOCK;
// this has side-effect since it is accumulation
cublasSgemv(handle, CUBLAS_OP_N, count, batch_group_count, &one, cbw, count, unit, 1, &one, configuration->w, 1);
}
static int _cwc_convnet_convolutional_backward_propagate_error_vary(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, ccv_convnet_layer_t* configuration, float* scratch, float* unit, const cudaStream_t& stream, const cublasHandle_t& handle,
int x, int y, int z)
{
int out_rows, out_cols, out_partition;
ccv_convnet_make_output(layer, layer->input.matrix.rows, layer->input.matrix.cols, &out_rows, &out_cols, &out_partition);
if (!(batch % x == 0 && z % y == 0 &&
layer->input.matrix.channels % (z * out_partition) == 0 &&
batch / x * z / y <= 1024 && /* thread per block constraint */
batch / x * z / y >= batch && batch / x * z / y >= z && /* shared memory loading constraint */
sizeof(float) * (batch + z) <= 48 * 1024 /* shared memory size constraint */))
return -1;
float* chw = scratch;
_cwc_kern_reorder_matrix_major_parted
<<<dim3(layer->net.convolutional.rows * layer->net.convolutional.cols, layer->input.matrix.channels / out_partition), dim3(layer->net.convolutional.count / out_partition, out_partition), 0, stream>>>
(layer->w, chw, layer->net.convolutional.rows * layer->net.convolutional.cols, layer->input.matrix.channels, layer->net.convolutional.count, layer->input.matrix.channels / out_partition, layer->net.convolutional.count / out_partition, out_partition);
#define vary_block(_x, _y, _z, _s) do { \
dim3 threads_per_block(batch / _x, _z / _y); \
assert(threads_per_block.x * threads_per_block.y <= 1024); \
dim3 num_blocks(layer->input.matrix.cols * layer->input.matrix.channels / (_z * out_partition), layer->input.matrix.rows, out_partition); \
int shared_memory_size = sizeof(float) * (batch + _z); \
cudaFuncSetCacheConfig(_cwc_kern_convolutional_backward_propagate_error<_x, _y, _z, _s>, cudaFuncCachePreferShared); \
_cwc_kern_convolutional_backward_propagate_error \
<_x, _y, _z, _s> \
<<<num_blocks, threads_per_block, shared_memory_size, stream>>> \
(layer->net.convolutional.border, batch, \
b, layer->input.matrix.rows, layer->input.matrix.cols, layer->input.matrix.channels, \
a, out_rows, out_cols, \
chw, layer->net.convolutional.rows, layer->net.convolutional.cols, layer->net.convolutional.count / out_partition, out_partition); \
} while (0)
cwc_vary_4_a(x, 1, 2, 4, 8, cwc_vary_5_b, y, 1, 2, 4, 6, 8, cwc_vary_6_c, z, 16, 24, 32, 36, 64, 72, cwc_vary_4_d, layer->net.convolutional.strides, 1, 2, 3, 4, vary_block);
#undef vary_block
cudaError_t error = cudaGetLastError();
if (cudaErrorInvalidConfiguration == error)
return -1;
assert(error == cudaSuccess);
return 0;
}
static void _cwc_convnet_convolutional_backward_propagate_error(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, ccv_convnet_layer_t* configuration, float* scratch, float* unit, const cudaStream_t& stream, const cublasHandle_t& handle)
{
static int vary_x[] = { 1, 2, 4, 8 };
static int vary_y[] = { 1, 2, 4, 6, 8 };
static int vary_z[] = { 16, 24, 32, 36, 64, 72 };
CWC_IMPLEMENT_VARY_STUB(EXTRA(layer)->vary.convolutional.backward.gradient, vary_x, vary_y, vary_z, _cwc_convnet_convolutional_backward_propagate_error_vary, layer, batch, a, n, m, b, configuration, scratch, unit, stream, handle);
}
void cwc_convnet_convolutional_backward_propagate(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, ccv_convnet_layer_t* configuration, float* scratch, float* unit, const cudaStream_t& stream, const cublasHandle_t& handle)
{
assert(layer->net.convolutional.count % 4 == 0);
assert(batch % BATCH_PER_BLOCK == 0);
int out_rows, out_cols, out_partition;
ccv_convnet_make_output(layer, layer->input.matrix.rows, layer->input.matrix.cols, &out_rows, &out_cols, &out_partition);
// it turns out that first apply relu would save us a lot of computation because no need to low both out and out_grad any more
cwc_kern_relu_backward_propagate
<<<dim3(out_cols, out_rows, layer->net.convolutional.count), batch, 0, stream>>>
(batch, n, a, out_rows, out_cols, layer->net.convolutional.count);
assert(cudaGetLastError() == cudaSuccess);
if (cwc_convnet_layer_use_rows(layer))
_cwc_convnet_convolutional_backward_propagate_coefficient_rows(layer, batch, a, n, m, b, configuration, scratch, unit, stream, handle);
else
_cwc_convnet_convolutional_backward_propagate_coefficient_default(layer, batch, a, n, m, b, configuration, scratch, unit, stream, handle);
// compute the bias directly using gemv routine
cublasSgemv(handle, CUBLAS_OP_T, out_rows * out_cols * batch, layer->net.convolutional.count, &one, a, out_rows * out_cols * batch, unit, 1, &one, configuration->bias, 1);
assert(cudaGetLastError() == cudaSuccess);
if (b)
_cwc_convnet_convolutional_backward_propagate_error(layer, batch, a, n, m, b, configuration, scratch, unit, stream, handle);
}
|
0d4444a8abfee7d98266b8fb8814cc86b48325fc.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include <iostream>
#include <fstream>
#include <string>
#include <iomanip>
#include <ctime>
#include "FitnessFunction.h"
int main()
{
int problemSum = 1;//
int testSum = 1;//
for (int curProblem = 0; curProblem < problemSum; curProblem++)//
{
cout << "" + to_string(curProblem + 1) + "" << endl;
#pragma region PSO
ifstream inputFile;
inputFile.open("../../InputParas/InputPara" + to_string(curProblem + 1) + ".txt");
//CPU
ProblemParas proParas(inputFile); //
cout << "" << endl;
int ThreadsPerBlock = 100; // Block100thread
int BlockSum = 1; // Block
int dim = proParas.DeviceSum * 3; // =*3(x,y,)
//CPU
PSOPara psopara(dim);
psopara.mesh_div_count = 4; //
psopara.problemParas = proParas; //
psopara.particle_num_ = ThreadsPerBlock * BlockSum; //
psopara.max_iter_num_ = 400; //
psopara.fitness_count_ = 2; //
psopara.archive_max_count = 50; // archive
psopara.SetDt(1.0); //
psopara.SetWstart(0.9); //
psopara.SetWend(0.4); //
psopara.SetC1(1.49445); // 1
psopara.SetC2(1.49445); // 2
psopara.SetLowBound(0, 0, DeviceDirect::Default); // position
psopara.blockSum = BlockSum;
psopara.threadsPerBlock = ThreadsPerBlock;
//3.
psopara.SetUpBound(proParas.workShopLength, proParas.workShopWidth, DeviceDirect::Rotate270 + 1);// position
#pragma endregion
#pragma region PSO
//GPU
PSOOptimizer psooptimizer(&psopara, proParas);//PSO
string curProblemFolderName = "Problem" + to_string(curProblem + 1);
for (int curTest = 0; curTest < testSum; curTest++) {//
clock_t startTime, endTime;//
startTime = clock();//
#pragma region
psooptimizer.InitialAllParticles();// CPU
psooptimizer.InitialArchiveList();//Archive CPU
psooptimizer.InitGbest();// CPU
#pragma endregion
#pragma region &
//1archiveList12archiveList2
//n
//Testn
ofstream OutFile;
ofstream OutFile1;
string curTestFolderName = "Test" + to_string(curTest + 1);
OutFile.open("../../Results/" + curProblemFolderName + "/" + curTestFolderName + "/archiveList1.txt");
OutFile1.open("../../Results/" + curProblemFolderName + "/" + curTestFolderName + "/archiveList2.txt");
for (int i = 0; i < psooptimizer.max_iter_num_; i++)//
{
cout << (i + 1) << endl;
psooptimizer.UpdateAllParticles();//
psooptimizer.UpdatePbest();//pbest
psooptimizer.UpdateArchiveList();//
psooptimizer.UpdateGbest();//gbest
//Archive
double minFitness1, minFitness2;
minFitness1 = minFitness2 = INT_MAX;
cout << minFitness1 << endl;
//archiveListCPU
for (auto it = psooptimizer.archive_list.begin(); it != psooptimizer.archive_list.end(); it++)
{
minFitness1 = min(minFitness1, it->fitness_[0]);
}
string f1line = to_string(minFitness1) + "\n";
OutFile << f1line;
cout << f1line << endl;
for (auto it = psooptimizer.archive_list.begin(); it != psooptimizer.archive_list.end(); it++)
{
minFitness2 = min(minFitness2, it->fitness_[1]);
}
string f2line = to_string(minFitness2) + "\n";
OutFile1 << f2line;
cout << f2line << endl;
}
OutFile.close();
OutFile1.close();
#pragma endregion
endTime = clock();
cout << "" << psopara.max_iter_num_ << ":" << static_cast<double>(endTime - startTime) / CLOCKS_PER_SEC << "s" << endl;
#pragma region &&
OutFile.open("../../Results/" + curProblemFolderName + "/" + curTestFolderName + "/FinalResult.txt");
#pragma region
int resultIndex = 0;
int minHandleCost = INT_MAX;
int minConveyValue = INT_MAX;
//
for (int i = 0; i < psooptimizer.archive_list.size(); i++)
{
if (psooptimizer.archive_list[i].fitness_[0] < minHandleCost)
{
minHandleCost = psooptimizer.archive_list[i].fitness_[0];
resultIndex = i;
}
}
//
//for (int i = 0; i < psooptimizer.archive_list.size(); i++)
//{
// if (psooptimizer.archive_list[i].fitness_[1] < minConveyValue)
// {
// minConveyValue = psooptimizer.archive_list[i].fitness_[0];
// resultIndex = i;
// }
//}
for (int i = 0; i < dim; i += 3)
{
OutFile << psooptimizer.archive_list[resultIndex].position_[i];
OutFile << ",";
OutFile << psooptimizer.archive_list[resultIndex].position_[i + 1];
OutFile << "\n";
}
#pragma endregion
//GPU->CPU
//
#pragma region
Vector2* deviceParaListSize_CPU = new Vector2[proParas.DeviceSum];
//CPUsize
for (int i = 0; i < proParas.DeviceSum; i++)
{
hipMemcpy(deviceParaListSize_CPU + i, &psooptimizer.problemParas.size[i], sizeof(Vector2), hipMemcpyDeviceToHost);
}
for (int i = 2; i < dim; i += 3)
{
DeviceDirect direct = (DeviceDirect)(int)psooptimizer.archive_list[resultIndex].position_[i];
string line = "";
if (direct == DeviceDirect::Rotate90 || direct == DeviceDirect::Rotate270)
{
//GPU
line = to_string(deviceParaListSize_CPU[i / 3].y) + "," +
to_string(deviceParaListSize_CPU[i / 3].x);
}
else {
line = to_string(deviceParaListSize_CPU[i / 3].x) + "," +
to_string(deviceParaListSize_CPU[i / 3].y);
}
OutFile << line + "\n";
}
#pragma endregion
int fitnessIndex = 0;
#pragma region
//inoutPoints
//int ioPointsSize = psooptimizer.bestPathInfoList[fitnessIndex].inoutPSize;
int ioPointsSize = psooptimizer.inoutPSize;//
InoutPoint* ioPoints = new InoutPoint[ioPointsSize];
hipMemcpy(ioPoints, psooptimizer.curBestPath_InoutPoints, sizeof(InoutPoint) * ioPointsSize, hipMemcpyDeviceToHost);
OutFile << to_string(ioPointsSize) + "\n";//
for (int i = 0; i < ioPointsSize; i++)
{
if (ioPoints[i].pointDirect == PointDirect::Up || ioPoints[i].pointDirect == PointDirect::Down)
{
OutFile << "Vertical ";
}
else
{
OutFile << "Horizon ";
}
OutFile << ioPoints[i].pointAxis.x;
OutFile << " ";
OutFile << ioPoints[i].pointAxis.y;
OutFile << "\n";
}
#pragma endregion
#pragma region
////
//string line = "";
//for (int i = 0; i < proParas.CargoTypeNum; i++)
//{
// line += to_string(proParas.cargoTypeList[i].deviceSum - 1);
// if (i != proParas.CargoTypeNum - 1)
// {
// line += " ";
// }
//}
//OutFile << line << "\n";
//vector<PointLink> p = psooptimizer.archive_list[resultIndex].pointLinks;
//for (int i = 0; i < p.size(); i++)
//{
// string s1, s2;
// DevicePara device1, device2;
// //s1 = to_string(p[i].device1Index) + " " + to_string(p[i].device2Index);
// OutFile << to_string(p[i].device1Index) + " " + to_string(p[i].device2Index) + " ";
// //s2
// for (int j = 0; j < p[i].points.size(); j++)
// {
// OutFile /*<< fixed << setprecision(1)*/ << p[i].points[j].x;
// OutFile << ",";
// OutFile /*<< fixed << setprecision(1)*/ << p[i].points[j].y;
// //s2 += to_string(p[i].points[j].x) + "," + to_string(p[i].points[j].y);
// if (j != p[i].points.size() - 1)
// {
// //s2 += "|";
// OutFile << "|";
// }
// }
// OutFile << "\n";
// //string line = s1 + " " + s2 + "\n";
// //OutFile << line;
//}
#pragma endregion
#pragma region
//GPU->CPU
int strInfoListSum = psooptimizer.curBestPath_StrConveyorListSum[0];
StraightConveyorInfo* strInfoList = new StraightConveyorInfo[strInfoListSum];
hipMemcpy(strInfoList, psooptimizer.curBestPath_StrConveyorList, sizeof(StraightConveyorInfo) * strInfoListSum, hipMemcpyDeviceToHost);
int curveInfoListSum = psooptimizer.curBestPath_CurveConveyorListSum[0];
Vector2Int* curveInfoList = new Vector2Int[curveInfoListSum];
hipMemcpy(curveInfoList, psooptimizer.curBestPath_CurveConveyorList, sizeof(Vector2Int)* curveInfoListSum, hipMemcpyDeviceToHost);
OutFile << strInfoListSum << "\n";
for (int i = 0; i < strInfoListSum; i++)
{
OutFile << to_string(strInfoList[i].startPos.x) << "," << to_string(strInfoList[i].startPos.y)
<< ";" << to_string(strInfoList[i].endPos.x) << "," << to_string(strInfoList[i].endPos.y)
<< ";" << to_string(strInfoList[i].startHnum) << ";" << to_string(strInfoList[i].startVnum)
<< ";" << to_string(strInfoList[i].endHnum) << ";" << to_string(strInfoList[i].endVnum)
<< "\n";
}
OutFile << curveInfoListSum << "\n";
for (int i = 0; i < curveInfoListSum; i++)
{
OutFile << to_string(curveInfoList[i].x) << "," << to_string(curveInfoList[i].y) << "\n";
}
#pragma endregion
OutFile.close();
#pragma endregion
}
//
#pragma endregion
}
return 0;
}
| 0d4444a8abfee7d98266b8fb8814cc86b48325fc.cu | #pragma once
#include <iostream>
#include <fstream>
#include <string>
#include <iomanip>
#include <ctime>
#include "FitnessFunction.h"
int main()
{
int problemSum = 1;//问题的数目
int testSum = 1;//每个实验跑的实验次数
for (int curProblem = 0; curProblem < problemSum; curProblem++)//跑多个问题
{
cout << "跑第" + to_string(curProblem + 1) + "个问题" << endl;
#pragma region 设置PSO参数
ifstream inputFile;
inputFile.open("../../InputParas/InputPara" + to_string(curProblem + 1) + ".txt");
//CPU
ProblemParas proParas(inputFile); // 初始化所有设备相关参数
cout << "文件读取成功" << endl;
int ThreadsPerBlock = 100; // 一个Block中100个thread
int BlockSum = 1; // Block的数目
int dim = proParas.DeviceSum * 3; // 总维度=设备数*3(x,y,朝向)
//CPU
PSOPara psopara(dim);
psopara.mesh_div_count = 4; // 网格划分数目
psopara.problemParas = proParas; // 布局问题的参数
psopara.particle_num_ = ThreadsPerBlock * BlockSum; // 粒子个数
psopara.max_iter_num_ = 400; // 最大迭代次数
psopara.fitness_count_ = 2; // 适应度数目
psopara.archive_max_count = 50; // archive数组的最大数目
psopara.SetDt(1.0); // 时间步长
psopara.SetWstart(0.9); // 初始权重
psopara.SetWend(0.4); // 结束权重
psopara.SetC1(1.49445); // 加速度因子1
psopara.SetC2(1.49445); // 加速度因子2
psopara.SetLowBound(0, 0, DeviceDirect::Default); // position的搜索范围下限
psopara.blockSum = BlockSum;
psopara.threadsPerBlock = ThreadsPerBlock;
//不要让设备朝向取到最大值,只能取到3.几
psopara.SetUpBound(proParas.workShopLength, proParas.workShopWidth, DeviceDirect::Rotate270 + 1);// position的搜索范围上限
#pragma endregion
#pragma region 调用PSO算法,并输出结果
//GPU
PSOOptimizer psooptimizer(&psopara, proParas);//PSO算法对象
string curProblemFolderName = "Problem" + to_string(curProblem + 1);
for (int curTest = 0; curTest < testSum; curTest++) {//每个问题跑多次
clock_t startTime, endTime;//记录调用时间
startTime = clock();//计时开始
#pragma region 初始化
psooptimizer.InitialAllParticles();//初始化所有粒子 CPU
psooptimizer.InitialArchiveList();//初始化Archive存档 CPU
psooptimizer.InitGbest();//初始化全局最优 CPU
#pragma endregion
#pragma region 迭代更新粒子&存每一次的适应度值
//目标1的值放在archiveList1中,目标2的值放在archiveList2中
//第n次实验放到文件里去
//文件夹的名字叫Testn
ofstream OutFile;
ofstream OutFile1;
string curTestFolderName = "Test" + to_string(curTest + 1);
OutFile.open("../../Results/" + curProblemFolderName + "/" + curTestFolderName + "/archiveList1.txt");
OutFile1.open("../../Results/" + curProblemFolderName + "/" + curTestFolderName + "/archiveList2.txt");
for (int i = 0; i < psooptimizer.max_iter_num_; i++)//开始并行操作
{
cout << (i + 1) << endl;
psooptimizer.UpdateAllParticles();//更新所有粒子的位置和速度
psooptimizer.UpdatePbest();//更新pbest
psooptimizer.UpdateArchiveList();//更新外部存档集合
psooptimizer.UpdateGbest();//更新gbest
//存储每次迭代的Archive集合
double minFitness1, minFitness2;
minFitness1 = minFitness2 = INT_MAX;
cout << minFitness1 << endl;
//archiveList在CPU端
for (auto it = psooptimizer.archive_list.begin(); it != psooptimizer.archive_list.end(); it++)
{
minFitness1 = min(minFitness1, it->fitness_[0]);
}
string f1line = to_string(minFitness1) + "\n";
OutFile << f1line;
cout << f1line << endl;
for (auto it = psooptimizer.archive_list.begin(); it != psooptimizer.archive_list.end(); it++)
{
minFitness2 = min(minFitness2, it->fitness_[1]);
}
string f2line = to_string(minFitness2) + "\n";
OutFile1 << f2line;
cout << f2line << endl;
}
OutFile.close();
OutFile1.close();
#pragma endregion
endTime = clock();
cout << "迭代" << psopara.max_iter_num_ << "次的最终用时:" << static_cast<double>(endTime - startTime) / CLOCKS_PER_SEC << "s" << endl;
#pragma region 保存设备尺寸&最终布局结果&连线点的坐标
OutFile.open("../../Results/" + curProblemFolderName + "/" + curTestFolderName + "/FinalResult.txt");
#pragma region 记录最终布局结果
int resultIndex = 0;
int minHandleCost = INT_MAX;
int minConveyValue = INT_MAX;
//优先选物料运输成本最低的
for (int i = 0; i < psooptimizer.archive_list.size(); i++)
{
if (psooptimizer.archive_list[i].fitness_[0] < minHandleCost)
{
minHandleCost = psooptimizer.archive_list[i].fitness_[0];
resultIndex = i;
}
}
//优先选输送机成本低的
//for (int i = 0; i < psooptimizer.archive_list.size(); i++)
//{
// if (psooptimizer.archive_list[i].fitness_[1] < minConveyValue)
// {
// minConveyValue = psooptimizer.archive_list[i].fitness_[0];
// resultIndex = i;
// }
//}
for (int i = 0; i < dim; i += 3)
{
OutFile << psooptimizer.archive_list[resultIndex].position_[i];
OutFile << ",";
OutFile << psooptimizer.archive_list[resultIndex].position_[i + 1];
OutFile << "\n";
}
#pragma endregion
//后面有一些参数需要从GPU->CPU
//只复制需要的
#pragma region 记录设备尺寸
Vector2* deviceParaListSize_CPU = new Vector2[proParas.DeviceSum];
//用循环的方式来复制数据到CPU,每次只复制一个size的地址
for (int i = 0; i < proParas.DeviceSum; i++)
{
cudaMemcpy(deviceParaListSize_CPU + i, &psooptimizer.problemParas.size[i], sizeof(Vector2), cudaMemcpyDeviceToHost);
}
for (int i = 2; i < dim; i += 3)
{
DeviceDirect direct = (DeviceDirect)(int)psooptimizer.archive_list[resultIndex].position_[i];
string line = "";
if (direct == DeviceDirect::Rotate90 || direct == DeviceDirect::Rotate270)
{
//这里的数据是GPU上的
line = to_string(deviceParaListSize_CPU[i / 3].y) + "," +
to_string(deviceParaListSize_CPU[i / 3].x);
}
else {
line = to_string(deviceParaListSize_CPU[i / 3].x) + "," +
to_string(deviceParaListSize_CPU[i / 3].y);
}
OutFile << line + "\n";
}
#pragma endregion
int fitnessIndex = 0;
#pragma region 记录出入口坐标(旋转之后的,不带设备坐标)
//复制一遍inoutPoints
//int ioPointsSize = psooptimizer.bestPathInfoList[fitnessIndex].inoutPSize;
int ioPointsSize = psooptimizer.inoutPSize;//
InoutPoint* ioPoints = new InoutPoint[ioPointsSize];
cudaMemcpy(ioPoints, psooptimizer.curBestPath_InoutPoints, sizeof(InoutPoint) * ioPointsSize, cudaMemcpyDeviceToHost);
OutFile << to_string(ioPointsSize) + "\n";//出入口数目
for (int i = 0; i < ioPointsSize; i++)
{
if (ioPoints[i].pointDirect == PointDirect::Up || ioPoints[i].pointDirect == PointDirect::Down)
{
OutFile << "Vertical ";
}
else
{
OutFile << "Horizon ";
}
OutFile << ioPoints[i].pointAxis.x;
OutFile << " ";
OutFile << ioPoints[i].pointAxis.y;
OutFile << "\n";
}
#pragma endregion
#pragma region 记录出入口路径
////先存每种货物的路径条数
//string line = "";
//for (int i = 0; i < proParas.CargoTypeNum; i++)
//{
// line += to_string(proParas.cargoTypeList[i].deviceSum - 1);
// if (i != proParas.CargoTypeNum - 1)
// {
// line += " ";
// }
//}
//OutFile << line << "\n";
//vector<PointLink> p = psooptimizer.archive_list[resultIndex].pointLinks;
//for (int i = 0; i < p.size(); i++)
//{
// string s1, s2;
// DevicePara device1, device2;
// //s1 = to_string(p[i].device1Index) + " " + to_string(p[i].device2Index);
// OutFile << to_string(p[i].device1Index) + " " + to_string(p[i].device2Index) + " ";
// //计算s2
// for (int j = 0; j < p[i].points.size(); j++)
// {
// OutFile /*<< fixed << setprecision(1)*/ << p[i].points[j].x;
// OutFile << ",";
// OutFile /*<< fixed << setprecision(1)*/ << p[i].points[j].y;
// //s2 += to_string(p[i].points[j].x) + "," + to_string(p[i].points[j].y);
// if (j != p[i].points.size() - 1)
// {
// //s2 += "|";
// OutFile << "|";
// }
// }
// OutFile << "\n";
// //string line = s1 + " " + s2 + "\n";
// //OutFile << line;
//}
#pragma endregion
#pragma region 记录直线输送机和转弯输送机参数
//GPU->CPU
int strInfoListSum = psooptimizer.curBestPath_StrConveyorListSum[0];
StraightConveyorInfo* strInfoList = new StraightConveyorInfo[strInfoListSum];
cudaMemcpy(strInfoList, psooptimizer.curBestPath_StrConveyorList, sizeof(StraightConveyorInfo) * strInfoListSum, cudaMemcpyDeviceToHost);
int curveInfoListSum = psooptimizer.curBestPath_CurveConveyorListSum[0];
Vector2Int* curveInfoList = new Vector2Int[curveInfoListSum];
cudaMemcpy(curveInfoList, psooptimizer.curBestPath_CurveConveyorList, sizeof(Vector2Int)* curveInfoListSum, cudaMemcpyDeviceToHost);
OutFile << strInfoListSum << "\n";
for (int i = 0; i < strInfoListSum; i++)
{
OutFile << to_string(strInfoList[i].startPos.x) << "," << to_string(strInfoList[i].startPos.y)
<< ";" << to_string(strInfoList[i].endPos.x) << "," << to_string(strInfoList[i].endPos.y)
<< ";" << to_string(strInfoList[i].startHnum) << ";" << to_string(strInfoList[i].startVnum)
<< ";" << to_string(strInfoList[i].endHnum) << ";" << to_string(strInfoList[i].endVnum)
<< "\n";
}
OutFile << curveInfoListSum << "\n";
for (int i = 0; i < curveInfoListSum; i++)
{
OutFile << to_string(curveInfoList[i].x) << "," << to_string(curveInfoList[i].y) << "\n";
}
#pragma endregion
OutFile.close();
#pragma endregion
}
//回收内存
#pragma endregion
}
return 0;
}
|
94525b9db171d14c2d5bb7e878b63400893f38aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
typedef unsigned int uint32;
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__device__ uint32 Cloamp_10bit(float data)
{
return ((uint32)data >> 2);
}
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
//int main()
//{
// const int arraySize = 5;
// const int a[arraySize] = { 1, 2, 3, 4, 5 };
// const int b[arraySize] = { 10, 20, 30, 40, 50 };
// int c[arraySize] = { 0 };
//
// // Add vectors in parallel.
// hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "addWithCuda failed!");
// return 1;
// }
//
// printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
// c[0], c[1], c[2], c[3], c[4]);
//
// // hipDeviceReset must be called before exiting in order for profiling and
// // tracing tools such as Nsight and Visual Profiler to show complete traces.
// cudaStatus = hipDeviceReset();
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipDeviceReset failed!");
// return 1;
// }
//
// return 0;
//}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
typedef unsigned int uint32;
typedef int int32;
typedef unsigned char uint8;
typedef struct uint24
{
uint8 r;
uint8 g;
uint8 b;
};
__global__ void cutPicture(uint32* dstImage, uint32* srcImage, int srcWidth, int srcHeight, int cx, int cy, int width)
{
int32 x, y;
int srcPitch = srcWidth * 3;
int dstPitch = width * 3;
uint24 *srcImageU8 = (uint24 *)srcImage;
uint24 *dstImageU8 = (uint24 *)dstImage;
x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x) >= width)
return; //x = width - 1;
if ((y) >= width)
return; // y = height - 1;
dstImageU8[y * width + x].r = srcImageU8[(y+cy) * srcWidth + x + cx].r;
dstImageU8[y * width + x].g = srcImageU8[(y+cy) * srcWidth + x + cx].g;
dstImageU8[y * width + x].b = srcImageU8[(y+cy) * srcWidth + x + cx].b;
dstImageU8[y * width + x + 1].r = srcImageU8[(y + cy) * srcWidth + x + cx + 1].r;
dstImageU8[y * width + x + 1].g = srcImageU8[(y + cy) * srcWidth + x + cx + 1].g;
dstImageU8[y * width + x + 1].b = srcImageU8[(y + cy) * srcWidth + x + cx + 1].b;
}
int main()
{
hipError_t cudaStatus;
int size = 0;
int x = 900;
int y = 400;
int width = 400;
int dstImageSize = width*width * 4;
FILE* pf = fopen("d:\\image.rgba", "rb");
unsigned char* pData = NULL;
if (pf)
{
fseek(pf, 0L, SEEK_END);
size = ftell(pf);
fseek(pf, 0L, SEEK_SET);
pData = new unsigned char[size];
fread(pData, size, 1, pf);
fclose(pf);
}
if (pData)
{
unsigned char* pSrcImage = NULL;
unsigned char* pDstImage = NULL;
unsigned char* pHImage = new unsigned char[dstImageSize];
cudaStatus = hipMalloc((void**)&pSrcImage, size);
cudaStatus = hipMalloc((void**)&pDstImage, dstImageSize);
cudaStatus = hipMemcpy(pSrcImage, pData, size, hipMemcpyHostToDevice);
dim3 block(32, 16, 1);
dim3 grid((width + (2 * block.x - 1)) / (2 * block.x), (width + (block.y - 1)) / block.y, 1);
cutPicture << <grid, block, 0 >> > ((uint32*)pDstImage, (uint32*)pSrcImage, 1920, 1080, x, y, width);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
}
cudaStatus = hipMemcpy(pHImage, pDstImage, dstImageSize, hipMemcpyDeviceToHost);
FILE* pf2 = fopen("d:\\cutimage.rgb", "wb");
if (pf2)
{
fwrite(pHImage, dstImageSize, 1, pf2);
fclose(pf2);
}
delete[] pHImage;
hipFree(pSrcImage);
hipFree(pDstImage);
delete[] pData;
}
return 0;
} | 94525b9db171d14c2d5bb7e878b63400893f38aa.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
typedef unsigned int uint32;
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__device__ uint32 Cloamp_10bit(float data)
{
return ((uint32)data >> 2);
}
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
//int main()
//{
// const int arraySize = 5;
// const int a[arraySize] = { 1, 2, 3, 4, 5 };
// const int b[arraySize] = { 10, 20, 30, 40, 50 };
// int c[arraySize] = { 0 };
//
// // Add vectors in parallel.
// cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "addWithCuda failed!");
// return 1;
// }
//
// printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
// c[0], c[1], c[2], c[3], c[4]);
//
// // cudaDeviceReset must be called before exiting in order for profiling and
// // tracing tools such as Nsight and Visual Profiler to show complete traces.
// cudaStatus = cudaDeviceReset();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaDeviceReset failed!");
// return 1;
// }
//
// return 0;
//}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
typedef unsigned int uint32;
typedef int int32;
typedef unsigned char uint8;
typedef struct uint24
{
uint8 r;
uint8 g;
uint8 b;
};
__global__ void cutPicture(uint32* dstImage, uint32* srcImage, int srcWidth, int srcHeight, int cx, int cy, int width)
{
int32 x, y;
int srcPitch = srcWidth * 3;
int dstPitch = width * 3;
uint24 *srcImageU8 = (uint24 *)srcImage;
uint24 *dstImageU8 = (uint24 *)dstImage;
x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x) >= width)
return; //x = width - 1;
if ((y) >= width)
return; // y = height - 1;
dstImageU8[y * width + x].r = srcImageU8[(y+cy) * srcWidth + x + cx].r;
dstImageU8[y * width + x].g = srcImageU8[(y+cy) * srcWidth + x + cx].g;
dstImageU8[y * width + x].b = srcImageU8[(y+cy) * srcWidth + x + cx].b;
dstImageU8[y * width + x + 1].r = srcImageU8[(y + cy) * srcWidth + x + cx + 1].r;
dstImageU8[y * width + x + 1].g = srcImageU8[(y + cy) * srcWidth + x + cx + 1].g;
dstImageU8[y * width + x + 1].b = srcImageU8[(y + cy) * srcWidth + x + cx + 1].b;
}
int main()
{
cudaError_t cudaStatus;
int size = 0;
int x = 900;
int y = 400;
int width = 400;
int dstImageSize = width*width * 4;
FILE* pf = fopen("d:\\image.rgba", "rb");
unsigned char* pData = NULL;
if (pf)
{
fseek(pf, 0L, SEEK_END);
size = ftell(pf);
fseek(pf, 0L, SEEK_SET);
pData = new unsigned char[size];
fread(pData, size, 1, pf);
fclose(pf);
}
if (pData)
{
unsigned char* pSrcImage = NULL;
unsigned char* pDstImage = NULL;
unsigned char* pHImage = new unsigned char[dstImageSize];
cudaStatus = cudaMalloc((void**)&pSrcImage, size);
cudaStatus = cudaMalloc((void**)&pDstImage, dstImageSize);
cudaStatus = cudaMemcpy(pSrcImage, pData, size, cudaMemcpyHostToDevice);
dim3 block(32, 16, 1);
dim3 grid((width + (2 * block.x - 1)) / (2 * block.x), (width + (block.y - 1)) / block.y, 1);
cutPicture << <grid, block, 0 >> > ((uint32*)pDstImage, (uint32*)pSrcImage, 1920, 1080, x, y, width);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
}
cudaStatus = cudaMemcpy(pHImage, pDstImage, dstImageSize, cudaMemcpyDeviceToHost);
FILE* pf2 = fopen("d:\\cutimage.rgb", "wb");
if (pf2)
{
fwrite(pHImage, dstImageSize, 1, pf2);
fclose(pf2);
}
delete[] pHImage;
cudaFree(pSrcImage);
cudaFree(pDstImage);
delete[] pData;
}
return 0;
} |
9fbebd0f02302a5952dbdd297d31ff5239588e43.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define DEVICE_CODE
#define CPLUSPLUS
#include "MDSystem_interface.h"
#include "GromacsFileManager.h"
#include "FileManager.h"
#include "MDException.h"
#include "Reshuffle_interface.h"
MDSystem::MDSystem()
{
xdfile = NULL;
xdx = NULL;
xdtrrx = NULL;
xdtrrv = NULL;
xdtrrf = NULL;
tmpNAtomType = 0;
// setNULL (&hdata);
// setNULL (&ddata);
}
void MDSystem::
normalizeDeviceData (MDTimer * timer)
{
if (timer != NULL) timer->tic(mdTimeNormalizeSys);
IndexType nob = (ddata.numAtom + DefaultNThreadPerBlock - 1) / DefaultNThreadPerBlock;
dim3 atomGridDim = toGridDim (nob);
hipLaunchKernelGGL(( normalizeSystem)
, dim3(atomGridDim), dim3(DefaultNThreadPerBlock), 0, 0,
box,
ddata.numAtom,
ddata.coord,
ddata.coordNoix,
ddata.coordNoiy,
ddata.coordNoiz);
checkCUDAError ("NeighborList::rebuild, normalize System");
if (timer != NULL) timer->toc(mdTimeNormalizeSys);
}
void MDSystem::initConfig (const char * configfile,
// const char * mapfile,
const IndexType & maxNumAtom)
{
FILE * fpc = fopen (configfile, "r");
if (fpc == NULL) {
throw MDExcptCannotOpenFile ("MDSystem::initConfig:", configfile);
}
while (fgetc(fpc) != '\n');
IndexType numAtom, numMem;
if (fscanf (fpc, "%d", &(numAtom)) != 1){
throw MDExcptWrongFileFormat ("MDSystem::initConfig", configfile);
}
if (maxNumAtom != 0) {
numMem = maxNumAtom;
}
else {
numMem = numAtom;
}
mallocHostMDData (numAtom, numMem, &hdata);
IndexType * tmpatomIndex = (IndexType * )malloc(sizeof(IndexType) * numMem);
if (tmpatomIndex == NULL){
throw MDExcptFailedMallocOnHost ("MDSystem::initConfig", "tmpatomIndex",
sizeof(IndexType) * numMem);
}
ScalorType bx, by, bz;
#ifdef COORD_IN_ONE_VEC
ScalorType * tmpx, * tmpy, * tmpz;
tmpx = (ScalorType *)malloc (sizeof(ScalorType) * numMem);
tmpy = (ScalorType *)malloc (sizeof(ScalorType) * numMem);
tmpz = (ScalorType *)malloc (sizeof(ScalorType) * numMem);
#endif
GromacsFileManager::readGroFile (configfile,
hdata.resdIndex, hdata.resdName,
hdata.atomName, hdata.atomIndex,
#ifndef COORD_IN_ONE_VEC
hdata.coordx, hdata.coordy, hdata.coordz,
#else
tmpx, tmpy, tmpz,
#endif
hdata.velox, hdata.veloy, hdata.veloz,
&bx, &by, &bz) ;
#ifdef COORD_IN_ONE_VEC
for (IndexType i = 0; i < numAtom; ++i){
hdata.coord[i].x = tmpx[i];
hdata.coord[i].y = tmpy[i];
hdata.coord[i].z = tmpz[i];
}
free (tmpx);
free (tmpy);
free (tmpz);
#endif
freeAPointer ((void**)&tmpatomIndex);
RectangularBoxGeometry::setBoxSize (bx, by, bz, &box);
// tmpNAtomType = readAtomNameMapFile (mapfile, hdata.numAtom, hdata.atomName,
// hdata.type, hdata.mass, hdata.charge) ;
// initMass (&hdata);
printf ("# total %d atoms found, %d types are presented in mapping file\n",
hdata.numAtom, tmpNAtomType);
for (IndexType i = 0; i < hdata.numAtom; ++i){
hdata.forcx[i] = 0.f;
hdata.forcy[i] = 0.f;
hdata.forcz[i] = 0.f;
}
hdata.NFreedom = hdata.numAtom * 3;
fclose (fpc);
}
void MDSystem::
initTopology (const Topology::System & sysTop)
{
if (hdata.numAtom != sysTop.indexShift.back()){
throw MDExcptWrongNumberAtomDataTopology ();
}
unsigned shift = 0;
for (unsigned i = 0; i < sysTop.molecules.size(); ++i){
for (unsigned j = 0; j < sysTop.numbers[i]; ++j){
for (unsigned k = 0; k < sysTop.molecules[i].size(); ++k){
hdata.mass[shift] = sysTop.molecules[i].atoms[k].mass;
hdata.charge[shift] = sysTop.molecules[i].atoms[k].charge;
hdata.type[shift] = sysTop.molecules[i].atoms[k].type;
shift++;
}
}
}
initMass (&hdata);
}
static __global__ void
init_backMapTable (const IndexType numAtom,
IndexType * backMapTable)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType ii = threadIdx.x + bid * blockDim.x;
if (ii < numAtom){
backMapTable[ii] = ii;
}
}
void MDSystem::
initDeviceData ()
{
////////////////////////////////////////////////////////////
// init device system
////////////////////////////////////////////////////////////
initDeviceMDData (&hdata, &ddata);
initDeviceMDData (&hdata, &recoveredDdata);
initDeviceMDData (&hdata, &bkDdata);
hipMalloc ((void**)&backMapTable, sizeof(IndexType) * hdata.numAtom);
hipMalloc ((void**)&backMapTableBuff, sizeof(IndexType) * hdata.numAtom);
checkCUDAError ("MDSystem::initDeviceMDData, malloc back map table");
dim3 myBlockDim, atomGridDim;
myBlockDim.x = DefaultNThreadPerBlock;
IndexType nob;
if (hdata.numAtom % myBlockDim.x == 0){
nob = hdata.numAtom / myBlockDim.x;
} else {
nob = hdata.numAtom / myBlockDim.x + 1;
}
atomGridDim = toGridDim (nob);
hipLaunchKernelGGL(( init_backMapTable)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
bkDdata.numAtom, backMapTable);
}
void MDSystem::writeHostDataGro (const char * filename,
int step,
float time,
MDTimer *timer)
{
if (timer != NULL) timer->tic(mdTimeDataIO);
FILE * fp = fopen (filename, "w");
if (fp == NULL){
throw MDExcptCannotOpenFile (filename);
}
fprintf (fp, "# at time = %f, step = %d", time, step);
#ifdef COORD_IN_ONE_VEC
ScalorType * tmpx, * tmpy, * tmpz;
tmpx = (ScalorType *)malloc (sizeof(ScalorType) * hdata.numMem);
tmpy = (ScalorType *)malloc (sizeof(ScalorType) * hdata.numMem);
tmpz = (ScalorType *)malloc (sizeof(ScalorType) * hdata.numMem);
for (IndexType i = 0; i < hdata.numAtom; ++i){
tmpx[i] = hdata.coord[i].x;
tmpy[i] = hdata.coord[i].y;
tmpz[i] = hdata.coord[i].z;
}
#endif
GromacsFileManager::writeGroFile (fp,
hdata.numAtom,
hdata.resdIndex, hdata.resdName,
hdata.atomName, hdata.atomIndex,
#ifndef COORD_IN_ONE_VEC
hdata.coordx, hdata.coordy, hdata.coordz,
#else
tmpx, tmpy, tmpz,
#endif
hdata.velox, hdata.veloy, hdata.veloz,
box.size.x, box.size.y, box.size.z) ;
#ifdef COORD_IN_ONE_VEC
free (tmpx);
free (tmpy);
free (tmpz);
#endif
fclose (fp);
if (timer != NULL) timer->toc(mdTimeDataIO);
}
MDSystem::~MDSystem()
{
freeAPointer ((void **)&xdx);
freeAPointer ((void **)&xdtrrx);
freeAPointer ((void **)&xdtrrv);
freeAPointer ((void **)&xdtrrf);
}
void MDSystem::updateHostFromDevice (MDTimer *timer)
{
if (timer != NULL) timer->tic(mdTimeDataTransfer);
cpyDeviceMDDataToHost (&ddata, &hdata);
if (timer != NULL) timer->toc(mdTimeDataTransfer);
}
void MDSystem::updateHostFromRecovered (MDTimer *timer)
{
if (timer != NULL) timer->tic(mdTimeDataTransfer);
cpyDeviceMDDataToHost (&recoveredDdata, &hdata);
if (timer != NULL) timer->toc(mdTimeDataTransfer);
}
void MDSystem::initWriteXtc (const char * filename, float prec)
{
xdfile = NULL;
xdfile = xdrfile_open (filename, "w");
if (xdfile == NULL){
MDExcptCannotOpenFile ("MDSystem::initWriteXtc", filename);
}
for (unsigned i = 0; i < 3; ++i){
for (unsigned j = 0; j < 3; ++j){
xdbox[i][j] = 0.f;
}
}
xdbox[0][0] = box.size.x;
xdbox[1][1] = box.size.y;
xdbox[2][2] = box.size.z;
xdx = (rvec *) malloc (sizeof(rvec) * hdata.numMem);
if (xdx == NULL){
MDExcptFailedMallocOnHost ("MDSystem::initWriteXtc", "xdx", sizeof(rvec) * hdata.numMem);
}
xdprec = prec;
}
void MDSystem::writeHostDataXtc (int step, float time, MDTimer *timer)
{
if (timer != NULL) timer->tic(mdTimeDataIO);
for (IndexType i = 0; i < hdata.numAtom; ++i){
#ifndef COORD_IN_ONE_VEC
xdx[i][0] = hdata.coordx[i];
xdx[i][1] = hdata.coordy[i];
xdx[i][2] = hdata.coordz[i];
#else
xdx[i][0] = hdata.coord[i].x;
xdx[i][1] = hdata.coord[i].y;
xdx[i][2] = hdata.coord[i].z;
#endif
}
xdbox[0][0] = box.size.x;
xdbox[1][1] = box.size.y;
xdbox[2][2] = box.size.z;
write_xtc (xdfile, hdata.numAtom, step, time, xdbox, xdx, xdprec);
if (timer != NULL) timer->tic(mdTimeDataIO);
}
void MDSystem::endWriteXtc()
{
freeAPointer ((void**)&xdx);
xdrfile_close(xdfile);
}
void MDSystem::
initWriteTrr (const char * filename)
{
xdtrrfile = NULL;
xdtrrfile = xdrfile_open (filename, "w");
if (xdtrrfile == NULL){
MDExcptCannotOpenFile ("MDSystem::initWriteTrr", filename);
}
for (unsigned i = 0; i < 3; ++i){
for (unsigned j = 0; j < 3; ++j){
xdbox[i][j] = 0.f;
}
}
xdbox[0][0] = box.size.x;
xdbox[1][1] = box.size.y;
xdbox[2][2] = box.size.z;
xdtrrx = (rvec *) malloc (sizeof(rvec) * hdata.numMem);
xdtrrv = (rvec *) malloc (sizeof(rvec) * hdata.numMem);
xdtrrf = (rvec *) malloc (sizeof(rvec) * hdata.numMem);
if (xdtrrx == NULL){
MDExcptFailedMallocOnHost ("MDSystem::initWriteXtc", "xdtrrx", sizeof(rvec) * hdata.numMem);
}
if (xdtrrv == NULL){
MDExcptFailedMallocOnHost ("MDSystem::initWriteXtc", "xdtrrv", sizeof(rvec) * hdata.numMem);
}
if (xdtrrf == NULL){
MDExcptFailedMallocOnHost ("MDSystem::initWriteXtc", "xdtrrf", sizeof(rvec) * hdata.numMem);
}
}
void MDSystem::
writeHostDataTrr (int step, float time, MDTimer *timer)
{
if (timer != NULL) timer->tic(mdTimeDataIO);
for (IndexType i = 0; i < hdata.numAtom; ++i){
xdtrrx[i][0] = hdata.coord[i].x;
xdtrrx[i][1] = hdata.coord[i].y;
xdtrrx[i][2] = hdata.coord[i].z;
xdtrrv[i][0] = hdata.velox[i];
xdtrrv[i][1] = hdata.veloy[i];
xdtrrv[i][2] = hdata.veloz[i];
xdtrrf[i][0] = hdata.forcx[i];
xdtrrf[i][1] = hdata.forcy[i];
xdtrrf[i][2] = hdata.forcz[i];
}
xdbox[0][0] = box.size.x;
xdbox[1][1] = box.size.y;
xdbox[2][2] = box.size.z;
int status = write_trr (xdtrrfile, hdata.numAtom, step, time, 0.f,
xdbox, xdtrrx, xdtrrv, xdtrrf);
if (status != exdrOK) {
printf ("error writing trr file!\n");
exit (1);
}
if (timer != NULL) timer->tic(mdTimeDataIO);
}
void MDSystem::endWriteTrr()
{
xdrfile_close(xdtrrfile);
freeAPointer ((void **)&xdtrrx);
freeAPointer ((void **)&xdtrrv);
freeAPointer ((void **)&xdtrrf);
}
void MDSystem::
writePosiForce (const char * filename)
{
FILE * fp = fopen (filename, "w");
if (fp == NULL){
fprintf (stderr, "cannot open file %s\n", filename);
exit (1);
}
for (unsigned i = 0; i < hdata.numAtom; ++i){
fprintf (fp, "%.8e %.8e %.8e %.8e %.8e %.8e\n",
hdata.coord[i].x,
hdata.coord[i].y,
hdata.coord[i].z,
hdata.forcx[i],
hdata.forcy[i],
hdata.forcz[i]);
}
fclose (fp);
}
void MDSystem::
recoverDeviceData (MDTimer * timer)
{
if (timer != NULL) timer->tic(mdTimeDataTransfer);
dim3 myBlockDim, atomGridDim;
myBlockDim.x = DefaultNThreadPerBlock;
IndexType nob;
if (hdata.numAtom % myBlockDim.x == 0){
nob = hdata.numAtom / myBlockDim.x;
} else {
nob = hdata.numAtom / myBlockDim.x + 1;
}
atomGridDim = toGridDim (nob);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
ddata.coord, ddata.numAtom, backMapTable, recoveredDdata.coord);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
ddata.coordNoix, ddata.numAtom, backMapTable, recoveredDdata.coordNoix);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
ddata.coordNoiy, ddata.numAtom, backMapTable, recoveredDdata.coordNoiy);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
ddata.coordNoiz, ddata.numAtom, backMapTable, recoveredDdata.coordNoiz);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
ddata.rcut, ddata.numAtom, backMapTable, recoveredDdata.rcut);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
ddata.velox, ddata.numAtom, backMapTable, recoveredDdata.velox);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
ddata.veloy, ddata.numAtom, backMapTable, recoveredDdata.veloy);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
ddata.veloz, ddata.numAtom, backMapTable, recoveredDdata.veloz);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
ddata.forcx, ddata.numAtom, backMapTable, recoveredDdata.forcx);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
ddata.forcy, ddata.numAtom, backMapTable, recoveredDdata.forcy);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
ddata.forcz, ddata.numAtom, backMapTable, recoveredDdata.forcz);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
ddata.type, ddata.numAtom, backMapTable, recoveredDdata.type);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
ddata.mass, ddata.numAtom, backMapTable, recoveredDdata.mass);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
ddata.massi, ddata.numAtom, backMapTable, recoveredDdata.massi);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
ddata.charge, ddata.numAtom, backMapTable, recoveredDdata.charge);
if (timer != NULL) timer->tic(mdTimeDataTransfer);
}
static __global__ void
Reshuffle_calBackMapTable (const IndexType numAtom,
const IndexType * backMapTableBuff,
const IndexType * idxTable,
IndexType *backMapTable)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType ii = threadIdx.x + bid * blockDim.x;
if (ii < numAtom){
backMapTable[idxTable[ii]] = backMapTableBuff[ii];
}
}
void MDSystem::
reshuffle (const IndexType * indexTable,
const IndexType & numAtom,
MDTimer * timer)
{
if (timer != NULL) timer->tic(mdTimeReshuffleSystem);
cpyDeviceMDDataToDevice (&ddata, &bkDdata);
IndexType nob;
dim3 myBlockDim;
myBlockDim.x = DefaultNThreadPerBlock;
if (ddata.numAtom % myBlockDim.x == 0){
nob = ddata.numAtom / myBlockDim.x;
} else {
nob = ddata.numAtom / myBlockDim.x + 1;
}
dim3 atomGridDim = toGridDim (nob);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
bkDdata.coord, ddata.numAtom, indexTable, ddata.coord);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
bkDdata.coordNoix, ddata.numAtom, indexTable, ddata.coordNoix);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
bkDdata.coordNoiy, ddata.numAtom, indexTable, ddata.coordNoiy);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
bkDdata.coordNoiz, ddata.numAtom, indexTable, ddata.coordNoiz);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
bkDdata.rcut, ddata.numAtom, indexTable, ddata.rcut);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
bkDdata.velox, ddata.numAtom, indexTable, ddata.velox);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
bkDdata.veloy, ddata.numAtom, indexTable, ddata.veloy);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
bkDdata.veloz, ddata.numAtom, indexTable, ddata.veloz);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
bkDdata.forcx, ddata.numAtom, indexTable, ddata.forcx);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
bkDdata.forcy, ddata.numAtom, indexTable, ddata.forcy);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
bkDdata.forcz, ddata.numAtom, indexTable, ddata.forcz);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
bkDdata.type, ddata.numAtom, indexTable, ddata.type);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
bkDdata.mass, ddata.numAtom, indexTable, ddata.mass);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
bkDdata.massi, ddata.numAtom, indexTable, ddata.massi);
hipLaunchKernelGGL(( Reshuffle_reshuffleArray)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
bkDdata.charge, ddata.numAtom, indexTable, ddata.charge);
hipMemcpy (backMapTableBuff, backMapTable, sizeof(IndexType) * ddata.numAtom,
hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( Reshuffle_calBackMapTable)
, dim3(atomGridDim), dim3(myBlockDim), 0, 0,
ddata.numAtom,
backMapTableBuff,
indexTable,
backMapTable);
if (timer != NULL) timer->toc(mdTimeReshuffleSystem);
}
| 9fbebd0f02302a5952dbdd297d31ff5239588e43.cu | #define DEVICE_CODE
#define CPLUSPLUS
#include "MDSystem_interface.h"
#include "GromacsFileManager.h"
#include "FileManager.h"
#include "MDException.h"
#include "Reshuffle_interface.h"
MDSystem::MDSystem()
{
xdfile = NULL;
xdx = NULL;
xdtrrx = NULL;
xdtrrv = NULL;
xdtrrf = NULL;
tmpNAtomType = 0;
// setNULL (&hdata);
// setNULL (&ddata);
}
void MDSystem::
normalizeDeviceData (MDTimer * timer)
{
if (timer != NULL) timer->tic(mdTimeNormalizeSys);
IndexType nob = (ddata.numAtom + DefaultNThreadPerBlock - 1) / DefaultNThreadPerBlock;
dim3 atomGridDim = toGridDim (nob);
normalizeSystem
<<<atomGridDim, DefaultNThreadPerBlock>>> (
box,
ddata.numAtom,
ddata.coord,
ddata.coordNoix,
ddata.coordNoiy,
ddata.coordNoiz);
checkCUDAError ("NeighborList::rebuild, normalize System");
if (timer != NULL) timer->toc(mdTimeNormalizeSys);
}
void MDSystem::initConfig (const char * configfile,
// const char * mapfile,
const IndexType & maxNumAtom)
{
FILE * fpc = fopen (configfile, "r");
if (fpc == NULL) {
throw MDExcptCannotOpenFile ("MDSystem::initConfig:", configfile);
}
while (fgetc(fpc) != '\n');
IndexType numAtom, numMem;
if (fscanf (fpc, "%d", &(numAtom)) != 1){
throw MDExcptWrongFileFormat ("MDSystem::initConfig", configfile);
}
if (maxNumAtom != 0) {
numMem = maxNumAtom;
}
else {
numMem = numAtom;
}
mallocHostMDData (numAtom, numMem, &hdata);
IndexType * tmpatomIndex = (IndexType * )malloc(sizeof(IndexType) * numMem);
if (tmpatomIndex == NULL){
throw MDExcptFailedMallocOnHost ("MDSystem::initConfig", "tmpatomIndex",
sizeof(IndexType) * numMem);
}
ScalorType bx, by, bz;
#ifdef COORD_IN_ONE_VEC
ScalorType * tmpx, * tmpy, * tmpz;
tmpx = (ScalorType *)malloc (sizeof(ScalorType) * numMem);
tmpy = (ScalorType *)malloc (sizeof(ScalorType) * numMem);
tmpz = (ScalorType *)malloc (sizeof(ScalorType) * numMem);
#endif
GromacsFileManager::readGroFile (configfile,
hdata.resdIndex, hdata.resdName,
hdata.atomName, hdata.atomIndex,
#ifndef COORD_IN_ONE_VEC
hdata.coordx, hdata.coordy, hdata.coordz,
#else
tmpx, tmpy, tmpz,
#endif
hdata.velox, hdata.veloy, hdata.veloz,
&bx, &by, &bz) ;
#ifdef COORD_IN_ONE_VEC
for (IndexType i = 0; i < numAtom; ++i){
hdata.coord[i].x = tmpx[i];
hdata.coord[i].y = tmpy[i];
hdata.coord[i].z = tmpz[i];
}
free (tmpx);
free (tmpy);
free (tmpz);
#endif
freeAPointer ((void**)&tmpatomIndex);
RectangularBoxGeometry::setBoxSize (bx, by, bz, &box);
// tmpNAtomType = readAtomNameMapFile (mapfile, hdata.numAtom, hdata.atomName,
// hdata.type, hdata.mass, hdata.charge) ;
// initMass (&hdata);
printf ("# total %d atoms found, %d types are presented in mapping file\n",
hdata.numAtom, tmpNAtomType);
for (IndexType i = 0; i < hdata.numAtom; ++i){
hdata.forcx[i] = 0.f;
hdata.forcy[i] = 0.f;
hdata.forcz[i] = 0.f;
}
hdata.NFreedom = hdata.numAtom * 3;
fclose (fpc);
}
void MDSystem::
initTopology (const Topology::System & sysTop)
{
if (hdata.numAtom != sysTop.indexShift.back()){
throw MDExcptWrongNumberAtomDataTopology ();
}
unsigned shift = 0;
for (unsigned i = 0; i < sysTop.molecules.size(); ++i){
for (unsigned j = 0; j < sysTop.numbers[i]; ++j){
for (unsigned k = 0; k < sysTop.molecules[i].size(); ++k){
hdata.mass[shift] = sysTop.molecules[i].atoms[k].mass;
hdata.charge[shift] = sysTop.molecules[i].atoms[k].charge;
hdata.type[shift] = sysTop.molecules[i].atoms[k].type;
shift++;
}
}
}
initMass (&hdata);
}
static __global__ void
init_backMapTable (const IndexType numAtom,
IndexType * backMapTable)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType ii = threadIdx.x + bid * blockDim.x;
if (ii < numAtom){
backMapTable[ii] = ii;
}
}
void MDSystem::
initDeviceData ()
{
////////////////////////////////////////////////////////////
// init device system
////////////////////////////////////////////////////////////
initDeviceMDData (&hdata, &ddata);
initDeviceMDData (&hdata, &recoveredDdata);
initDeviceMDData (&hdata, &bkDdata);
cudaMalloc ((void**)&backMapTable, sizeof(IndexType) * hdata.numAtom);
cudaMalloc ((void**)&backMapTableBuff, sizeof(IndexType) * hdata.numAtom);
checkCUDAError ("MDSystem::initDeviceMDData, malloc back map table");
dim3 myBlockDim, atomGridDim;
myBlockDim.x = DefaultNThreadPerBlock;
IndexType nob;
if (hdata.numAtom % myBlockDim.x == 0){
nob = hdata.numAtom / myBlockDim.x;
} else {
nob = hdata.numAtom / myBlockDim.x + 1;
}
atomGridDim = toGridDim (nob);
init_backMapTable
<<<atomGridDim, myBlockDim>>> (
bkDdata.numAtom, backMapTable);
}
void MDSystem::writeHostDataGro (const char * filename,
int step,
float time,
MDTimer *timer)
{
if (timer != NULL) timer->tic(mdTimeDataIO);
FILE * fp = fopen (filename, "w");
if (fp == NULL){
throw MDExcptCannotOpenFile (filename);
}
fprintf (fp, "# at time = %f, step = %d", time, step);
#ifdef COORD_IN_ONE_VEC
ScalorType * tmpx, * tmpy, * tmpz;
tmpx = (ScalorType *)malloc (sizeof(ScalorType) * hdata.numMem);
tmpy = (ScalorType *)malloc (sizeof(ScalorType) * hdata.numMem);
tmpz = (ScalorType *)malloc (sizeof(ScalorType) * hdata.numMem);
for (IndexType i = 0; i < hdata.numAtom; ++i){
tmpx[i] = hdata.coord[i].x;
tmpy[i] = hdata.coord[i].y;
tmpz[i] = hdata.coord[i].z;
}
#endif
GromacsFileManager::writeGroFile (fp,
hdata.numAtom,
hdata.resdIndex, hdata.resdName,
hdata.atomName, hdata.atomIndex,
#ifndef COORD_IN_ONE_VEC
hdata.coordx, hdata.coordy, hdata.coordz,
#else
tmpx, tmpy, tmpz,
#endif
hdata.velox, hdata.veloy, hdata.veloz,
box.size.x, box.size.y, box.size.z) ;
#ifdef COORD_IN_ONE_VEC
free (tmpx);
free (tmpy);
free (tmpz);
#endif
fclose (fp);
if (timer != NULL) timer->toc(mdTimeDataIO);
}
MDSystem::~MDSystem()
{
freeAPointer ((void **)&xdx);
freeAPointer ((void **)&xdtrrx);
freeAPointer ((void **)&xdtrrv);
freeAPointer ((void **)&xdtrrf);
}
void MDSystem::updateHostFromDevice (MDTimer *timer)
{
if (timer != NULL) timer->tic(mdTimeDataTransfer);
cpyDeviceMDDataToHost (&ddata, &hdata);
if (timer != NULL) timer->toc(mdTimeDataTransfer);
}
void MDSystem::updateHostFromRecovered (MDTimer *timer)
{
if (timer != NULL) timer->tic(mdTimeDataTransfer);
cpyDeviceMDDataToHost (&recoveredDdata, &hdata);
if (timer != NULL) timer->toc(mdTimeDataTransfer);
}
void MDSystem::initWriteXtc (const char * filename, float prec)
{
xdfile = NULL;
xdfile = xdrfile_open (filename, "w");
if (xdfile == NULL){
MDExcptCannotOpenFile ("MDSystem::initWriteXtc", filename);
}
for (unsigned i = 0; i < 3; ++i){
for (unsigned j = 0; j < 3; ++j){
xdbox[i][j] = 0.f;
}
}
xdbox[0][0] = box.size.x;
xdbox[1][1] = box.size.y;
xdbox[2][2] = box.size.z;
xdx = (rvec *) malloc (sizeof(rvec) * hdata.numMem);
if (xdx == NULL){
MDExcptFailedMallocOnHost ("MDSystem::initWriteXtc", "xdx", sizeof(rvec) * hdata.numMem);
}
xdprec = prec;
}
void MDSystem::writeHostDataXtc (int step, float time, MDTimer *timer)
{
if (timer != NULL) timer->tic(mdTimeDataIO);
for (IndexType i = 0; i < hdata.numAtom; ++i){
#ifndef COORD_IN_ONE_VEC
xdx[i][0] = hdata.coordx[i];
xdx[i][1] = hdata.coordy[i];
xdx[i][2] = hdata.coordz[i];
#else
xdx[i][0] = hdata.coord[i].x;
xdx[i][1] = hdata.coord[i].y;
xdx[i][2] = hdata.coord[i].z;
#endif
}
xdbox[0][0] = box.size.x;
xdbox[1][1] = box.size.y;
xdbox[2][2] = box.size.z;
write_xtc (xdfile, hdata.numAtom, step, time, xdbox, xdx, xdprec);
if (timer != NULL) timer->tic(mdTimeDataIO);
}
void MDSystem::endWriteXtc()
{
freeAPointer ((void**)&xdx);
xdrfile_close(xdfile);
}
void MDSystem::
initWriteTrr (const char * filename)
{
xdtrrfile = NULL;
xdtrrfile = xdrfile_open (filename, "w");
if (xdtrrfile == NULL){
MDExcptCannotOpenFile ("MDSystem::initWriteTrr", filename);
}
for (unsigned i = 0; i < 3; ++i){
for (unsigned j = 0; j < 3; ++j){
xdbox[i][j] = 0.f;
}
}
xdbox[0][0] = box.size.x;
xdbox[1][1] = box.size.y;
xdbox[2][2] = box.size.z;
xdtrrx = (rvec *) malloc (sizeof(rvec) * hdata.numMem);
xdtrrv = (rvec *) malloc (sizeof(rvec) * hdata.numMem);
xdtrrf = (rvec *) malloc (sizeof(rvec) * hdata.numMem);
if (xdtrrx == NULL){
MDExcptFailedMallocOnHost ("MDSystem::initWriteXtc", "xdtrrx", sizeof(rvec) * hdata.numMem);
}
if (xdtrrv == NULL){
MDExcptFailedMallocOnHost ("MDSystem::initWriteXtc", "xdtrrv", sizeof(rvec) * hdata.numMem);
}
if (xdtrrf == NULL){
MDExcptFailedMallocOnHost ("MDSystem::initWriteXtc", "xdtrrf", sizeof(rvec) * hdata.numMem);
}
}
void MDSystem::
writeHostDataTrr (int step, float time, MDTimer *timer)
{
if (timer != NULL) timer->tic(mdTimeDataIO);
for (IndexType i = 0; i < hdata.numAtom; ++i){
xdtrrx[i][0] = hdata.coord[i].x;
xdtrrx[i][1] = hdata.coord[i].y;
xdtrrx[i][2] = hdata.coord[i].z;
xdtrrv[i][0] = hdata.velox[i];
xdtrrv[i][1] = hdata.veloy[i];
xdtrrv[i][2] = hdata.veloz[i];
xdtrrf[i][0] = hdata.forcx[i];
xdtrrf[i][1] = hdata.forcy[i];
xdtrrf[i][2] = hdata.forcz[i];
}
xdbox[0][0] = box.size.x;
xdbox[1][1] = box.size.y;
xdbox[2][2] = box.size.z;
int status = write_trr (xdtrrfile, hdata.numAtom, step, time, 0.f,
xdbox, xdtrrx, xdtrrv, xdtrrf);
if (status != exdrOK) {
printf ("error writing trr file!\n");
exit (1);
}
if (timer != NULL) timer->tic(mdTimeDataIO);
}
void MDSystem::endWriteTrr()
{
xdrfile_close(xdtrrfile);
freeAPointer ((void **)&xdtrrx);
freeAPointer ((void **)&xdtrrv);
freeAPointer ((void **)&xdtrrf);
}
void MDSystem::
writePosiForce (const char * filename)
{
FILE * fp = fopen (filename, "w");
if (fp == NULL){
fprintf (stderr, "cannot open file %s\n", filename);
exit (1);
}
for (unsigned i = 0; i < hdata.numAtom; ++i){
fprintf (fp, "%.8e %.8e %.8e %.8e %.8e %.8e\n",
hdata.coord[i].x,
hdata.coord[i].y,
hdata.coord[i].z,
hdata.forcx[i],
hdata.forcy[i],
hdata.forcz[i]);
}
fclose (fp);
}
void MDSystem::
recoverDeviceData (MDTimer * timer)
{
if (timer != NULL) timer->tic(mdTimeDataTransfer);
dim3 myBlockDim, atomGridDim;
myBlockDim.x = DefaultNThreadPerBlock;
IndexType nob;
if (hdata.numAtom % myBlockDim.x == 0){
nob = hdata.numAtom / myBlockDim.x;
} else {
nob = hdata.numAtom / myBlockDim.x + 1;
}
atomGridDim = toGridDim (nob);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(ddata.coord, ddata.numAtom, backMapTable, recoveredDdata.coord);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(ddata.coordNoix, ddata.numAtom, backMapTable, recoveredDdata.coordNoix);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(ddata.coordNoiy, ddata.numAtom, backMapTable, recoveredDdata.coordNoiy);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(ddata.coordNoiz, ddata.numAtom, backMapTable, recoveredDdata.coordNoiz);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(ddata.rcut, ddata.numAtom, backMapTable, recoveredDdata.rcut);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(ddata.velox, ddata.numAtom, backMapTable, recoveredDdata.velox);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(ddata.veloy, ddata.numAtom, backMapTable, recoveredDdata.veloy);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(ddata.veloz, ddata.numAtom, backMapTable, recoveredDdata.veloz);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(ddata.forcx, ddata.numAtom, backMapTable, recoveredDdata.forcx);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(ddata.forcy, ddata.numAtom, backMapTable, recoveredDdata.forcy);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(ddata.forcz, ddata.numAtom, backMapTable, recoveredDdata.forcz);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(ddata.type, ddata.numAtom, backMapTable, recoveredDdata.type);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(ddata.mass, ddata.numAtom, backMapTable, recoveredDdata.mass);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(ddata.massi, ddata.numAtom, backMapTable, recoveredDdata.massi);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(ddata.charge, ddata.numAtom, backMapTable, recoveredDdata.charge);
if (timer != NULL) timer->tic(mdTimeDataTransfer);
}
static __global__ void
Reshuffle_calBackMapTable (const IndexType numAtom,
const IndexType * backMapTableBuff,
const IndexType * idxTable,
IndexType *backMapTable)
{
IndexType bid = blockIdx.x + gridDim.x * blockIdx.y;
IndexType ii = threadIdx.x + bid * blockDim.x;
if (ii < numAtom){
backMapTable[idxTable[ii]] = backMapTableBuff[ii];
}
}
void MDSystem::
reshuffle (const IndexType * indexTable,
const IndexType & numAtom,
MDTimer * timer)
{
if (timer != NULL) timer->tic(mdTimeReshuffleSystem);
cpyDeviceMDDataToDevice (&ddata, &bkDdata);
IndexType nob;
dim3 myBlockDim;
myBlockDim.x = DefaultNThreadPerBlock;
if (ddata.numAtom % myBlockDim.x == 0){
nob = ddata.numAtom / myBlockDim.x;
} else {
nob = ddata.numAtom / myBlockDim.x + 1;
}
dim3 atomGridDim = toGridDim (nob);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(bkDdata.coord, ddata.numAtom, indexTable, ddata.coord);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(bkDdata.coordNoix, ddata.numAtom, indexTable, ddata.coordNoix);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(bkDdata.coordNoiy, ddata.numAtom, indexTable, ddata.coordNoiy);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(bkDdata.coordNoiz, ddata.numAtom, indexTable, ddata.coordNoiz);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(bkDdata.rcut, ddata.numAtom, indexTable, ddata.rcut);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(bkDdata.velox, ddata.numAtom, indexTable, ddata.velox);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(bkDdata.veloy, ddata.numAtom, indexTable, ddata.veloy);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(bkDdata.veloz, ddata.numAtom, indexTable, ddata.veloz);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(bkDdata.forcx, ddata.numAtom, indexTable, ddata.forcx);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(bkDdata.forcy, ddata.numAtom, indexTable, ddata.forcy);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(bkDdata.forcz, ddata.numAtom, indexTable, ddata.forcz);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(bkDdata.type, ddata.numAtom, indexTable, ddata.type);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(bkDdata.mass, ddata.numAtom, indexTable, ddata.mass);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(bkDdata.massi, ddata.numAtom, indexTable, ddata.massi);
Reshuffle_reshuffleArray
<<<atomGridDim, myBlockDim>>>
(bkDdata.charge, ddata.numAtom, indexTable, ddata.charge);
cudaMemcpy (backMapTableBuff, backMapTable, sizeof(IndexType) * ddata.numAtom,
cudaMemcpyDeviceToDevice);
Reshuffle_calBackMapTable
<<<atomGridDim, myBlockDim>>> (
ddata.numAtom,
backMapTableBuff,
indexTable,
backMapTable);
if (timer != NULL) timer->toc(mdTimeReshuffleSystem);
}
|
87d8ebd9179bb4cf706e6db04f594e206dc5a167.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "user_defined_types.h"
#include "op_datatypes.h"
#include "kernels.h"
__device__
#include <save_soln.h>
__global__
void op_cuda_save_soln(float *arg0,float *arg1,int set_size)
{
for (int n = threadIdx.x + blockIdx.x * blockDim.x; n < set_size; n += blockDim.x * gridDim.x) {
save_soln(arg0 + n * 4,arg1 + n * 4);
}
}
float op_par_loop_save_soln(const char *name,op_set set,struct op_dat<void> *arg0,int idx0,op_ptr *ptr0,enum op_access acc0,struct op_dat<void> *arg1,int idx1,op_ptr *ptr1,enum op_access acc1)
{
int bsize = BSIZE;
int gridsize = (set.size - 1) / bsize + 1;
int reduct_bytes = 0;
int reduct_size = 0;
int reduct_shared = reduct_size * (BSIZE / 2);
int const_bytes = 0;
hipEvent_t start, stop;
float elapsed_time_ms = 0.00000F;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
hipLaunchKernelGGL(( op_cuda_save_soln), dim3(gridsize),dim3(bsize),reduct_shared, 0, ((float *)arg0->dat_d),((float *)arg1->dat_d),set.size);
hipEventRecord(stop,0);
hipDeviceSynchronize();
hipEventElapsedTime(&elapsed_time_ms,start,stop);
hipEventDestroy(start);
hipEventDestroy(stop);
return elapsed_time_ms;
}
| 87d8ebd9179bb4cf706e6db04f594e206dc5a167.cu | #include "user_defined_types.h"
#include "op_datatypes.h"
#include "kernels.h"
__device__
#include <save_soln.h>
__global__
void op_cuda_save_soln(float *arg0,float *arg1,int set_size)
{
for (int n = threadIdx.x + blockIdx.x * blockDim.x; n < set_size; n += blockDim.x * gridDim.x) {
save_soln(arg0 + n * 4,arg1 + n * 4);
}
}
float op_par_loop_save_soln(const char *name,op_set set,struct op_dat<void> *arg0,int idx0,op_ptr *ptr0,enum op_access acc0,struct op_dat<void> *arg1,int idx1,op_ptr *ptr1,enum op_access acc1)
{
int bsize = BSIZE;
int gridsize = (set.size - 1) / bsize + 1;
int reduct_bytes = 0;
int reduct_size = 0;
int reduct_shared = reduct_size * (BSIZE / 2);
int const_bytes = 0;
cudaEvent_t start, stop;
float elapsed_time_ms = 0.00000F;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
op_cuda_save_soln<<<gridsize,bsize,reduct_shared>>>(((float *)arg0->dat_d),((float *)arg1->dat_d),set.size);
cudaEventRecord(stop,0);
cudaThreadSynchronize();
cudaEventElapsedTime(&elapsed_time_ms,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return elapsed_time_ms;
}
|
9e53064fca793b17618a65e193bfd044749691ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "assignmentHPC1.cuh"
#include <iostream>
#include <cstdlib>
#include <chrono>
using namespace std;
using namespace std::chrono;
__global__ void reduce(float *input, float *output, unsigned int n) {
unsigned int block_size = blockDim.x;
unsigned int thread_id = threadIdx.x;
unsigned int block_id = blockIdx.x;
unsigned int block_start = block_id * block_size * 2 + thread_id;
for (unsigned int stride = block_size; stride > 0; stride /= 2) {
if (thread_id < stride && block_start + stride < n) {
input[block_start] += input[block_start + stride];
}
__syncthreads();
}
if (!thread_id) {
output[block_id] = input[block_start];
}
}
double find_sum_cpu(double *arr_host, unsigned int N) {
double result = 0.0f;
cout<<"N : "<<N<<endl;
for(unsigned int i = 0; i < N; i++) {
result += arr_host[i];
}
return result;
}
void find_sum(double *arr_host, unsigned int N) {
// ----------------------------------------- CPU Code -------------------------------------------------
auto startCPU = high_resolution_clock::now();
double result = find_sum_cpu(arr_host, N);
auto stopCPU = high_resolution_clock::now();
cout<<"\n\n--------------- CPU ---------------\n"<<endl;
cout<<"Answer CPU : "<<result<<endl;
cout<<"\nTime on CPU : "<<duration_cast<microseconds>(stopCPU - startCPU).count()/1000<<" milli seconds\n\n"<<endl;
// ----------------------------------------- GPU Code -------------------------------------------------
} | 9e53064fca793b17618a65e193bfd044749691ae.cu | #include "assignmentHPC1.cuh"
#include <iostream>
#include <cstdlib>
#include <chrono>
using namespace std;
using namespace std::chrono;
__global__ void reduce(float *input, float *output, unsigned int n) {
unsigned int block_size = blockDim.x;
unsigned int thread_id = threadIdx.x;
unsigned int block_id = blockIdx.x;
unsigned int block_start = block_id * block_size * 2 + thread_id;
for (unsigned int stride = block_size; stride > 0; stride /= 2) {
if (thread_id < stride && block_start + stride < n) {
input[block_start] += input[block_start + stride];
}
__syncthreads();
}
if (!thread_id) {
output[block_id] = input[block_start];
}
}
double find_sum_cpu(double *arr_host, unsigned int N) {
double result = 0.0f;
cout<<"N : "<<N<<endl;
for(unsigned int i = 0; i < N; i++) {
result += arr_host[i];
}
return result;
}
void find_sum(double *arr_host, unsigned int N) {
// ----------------------------------------- CPU Code -------------------------------------------------
auto startCPU = high_resolution_clock::now();
double result = find_sum_cpu(arr_host, N);
auto stopCPU = high_resolution_clock::now();
cout<<"\n\n--------------- CPU ---------------\n"<<endl;
cout<<"Answer CPU : "<<result<<endl;
cout<<"\nTime on CPU : "<<duration_cast<microseconds>(stopCPU - startCPU).count()/1000<<" milli seconds\n\n"<<endl;
// ----------------------------------------- GPU Code -------------------------------------------------
} |
703bca904894cd6bc476c6136677372dc78ac16e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "CudaUtils.h"
#include "LZ4CompressionKernels.h"
#include "TempSpaceBroker.h"
#include "common.h"
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Weffc++"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#endif
#include <hipcub/hipcub.hpp>
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
#include "hip/hip_runtime.h"
#include <cassert>
#include <fstream>
#include <iostream>
#include <vector>
using offset_type = uint16_t;
using word_type = uint32_t;
using position_type = size_t;
using double_word_type = uint64_t;
using item_type = uint32_t;
namespace nvcomp {
constexpr const int COMP_THREADS = 32;
constexpr const int DECOMP_THREADS = 32;
constexpr const int Y_DIM = 2;
constexpr const position_type BUFFER_SIZE
= DECOMP_THREADS * sizeof(double_word_type);
constexpr const position_type PREFETCH_DIST = BUFFER_SIZE / 2;
constexpr const position_type HASH_TABLE_SIZE = 1U << 14;
constexpr const offset_type NULL_OFFSET = static_cast<offset_type>(-1);
constexpr const position_type MAX_OFFSET = (1U << 16) - 1;
constexpr const size_t MIN_CHUNK_SIZE = sizeof(offset_type) * HASH_TABLE_SIZE;
constexpr const size_t MAX_CHUNK_SIZE = 1U << 24; // 16 MB
// ideally this would fit in a quad-word -- right now though it spills into
// 24-bytes (instead of 16-bytes).
struct chunk_header
{
const uint8_t* src;
uint8_t* dst;
uint32_t size;
};
struct compression_chunk_header
{
const uint8_t* src;
uint8_t* dst;
offset_type* hash;
size_t* comp_size;
uint32_t size;
};
/******************************************************************************
* DEVICE FUNCTIONS AND KERNELS ***********************************************
*****************************************************************************/
inline __device__ __host__ size_t maxSizeOfStream(const size_t size)
{
const size_t expansion = size + 1 + roundUpDiv(size, 255);
return roundUpTo(expansion, sizeof(size_t));
}
inline __device__ void syncCTA()
{
if (DECOMP_THREADS > 32) {
__syncthreads();
} else {
__syncwarp();
}
}
inline __device__ int warpBallot(int vote)
{
return __ballot_sync(0xffffffff, vote);
}
template <typename T>
inline __device__ int warpMatchAny(const int participants, T val)
{
#if __CUDA_ARCH__ >= 700
return __match_any_sync(participants, val);
#else
int mask = 0;
// full search
assert(blockDim.x == 32);
for (int d = 1; d < 32; ++d) {
const int nbr_id = (threadIdx.x + d) & 31;
mask |= (val == __shfl_sync(participants, val, nbr_id)) << nbr_id;
}
return mask;
#endif
}
template <typename T>
inline __device__ void writeWord(uint8_t* const address, const T word)
{
#pragma unroll
for (size_t i = 0; i < sizeof(T); ++i) {
address[i] = static_cast<uint8_t>((word >> (8 * i)) & 0xff);
}
}
template <typename T>
inline __device__ T readWord(const uint8_t* const address)
{
T word = 0;
for (size_t i = 0; i < sizeof(T); ++i) {
word |= address[i] << (8 * i);
}
return word;
}
template<int BLOCK_SIZE>
inline __device__ void writeLSIC(uint8_t* const out, const position_type number)
{
assert(BLOCK_SIZE == blockDim.x);
const position_type num = (number / 0xffu) + 1;
const uint8_t leftOver = number % 0xffu;
for (size_t i = threadIdx.x; i < num; i += BLOCK_SIZE) {
const uint8_t val = i + 1 < num ? 0xffu : leftOver;
out[i] = val;
}
}
struct token_type
{
position_type num_literals;
position_type num_matches;
__device__ bool hasNumLiteralsOverflow() const
{
return num_literals >= 15;
}
__device__ bool hasNumMatchesOverflow() const
{
return num_matches >= 19;
}
__device__ position_type numLiteralsOverflow() const
{
if (hasNumLiteralsOverflow()) {
return num_literals - 15;
} else {
return 0;
}
}
__device__ uint8_t numLiteralsForHeader() const
{
if (hasNumLiteralsOverflow()) {
return 15;
} else {
return num_literals;
}
}
__device__ position_type numMatchesOverflow() const
{
if (hasNumMatchesOverflow()) {
assert(num_matches >= 19);
return num_matches - 19;
} else {
assert(num_matches < 19);
return 0;
}
}
__device__ uint8_t numMatchesForHeader() const
{
if (hasNumMatchesOverflow()) {
return 15;
} else {
return num_matches - 4;
}
}
__device__ position_type lengthOfLiteralEncoding() const
{
if (hasNumLiteralsOverflow()) {
const position_type num = numLiteralsOverflow();
const position_type length = (num / 0xff) + 1;
return length;
}
return 0;
}
__device__ position_type lengthOfMatchEncoding() const
{
if (hasNumMatchesOverflow()) {
const position_type num = numMatchesOverflow();
const position_type length = (num / 0xff) + 1;
return length;
}
return 0;
}
};
class BufferControl
{
public:
__device__ BufferControl(
uint8_t* const buffer, const uint8_t* const compData, const position_type length) :
m_offset(0),
m_length(length),
m_buffer(buffer),
m_compData(compData)
{
// do nothing
}
#ifdef WARP_READ_LSIC
// this is currently unused as its slower
inline __device__ position_type queryLSIC(const position_type idx) const
{
if (idx + DECOMP_THREADS <= end()) {
// most likely case
const uint8_t byte = rawAt(idx)[threadIdx.x];
uint32_t mask = warpBallot(byte != 0xff);
mask = __brev(mask);
const position_type fullBytes = __clz(mask);
if (fullBytes < DECOMP_THREADS) {
return fullBytes * 0xff + rawAt(idx)[fullBytes];
} else {
return DECOMP_THREADS * 0xff;
}
} else {
uint8_t byte;
if (idx + threadIdx.x < end()) {
byte = rawAt(idx)[threadIdx.x];
} else {
byte = m_compData[idx + threadIdx.x];
}
uint32_t mask = warpBallot(byte != 0xff);
mask = __brev(mask);
const position_type fullBytes = __clz(mask);
if (fullBytes < DECOMP_THREADS) {
return fullBytes * 0xff + __shfl_sync(0xffffffff, byte, fullBytes);
} else {
return DECOMP_THREADS * 0xff;
}
}
}
#endif
inline __device__ position_type readLSIC(position_type& idx) const
{
#ifdef WARP_READ_LSIC
position_type num = 0;
while (true) {
const position_type block = queryLSIC(idx);
num += block;
if (block < DECOMP_THREADS * 0xff) {
idx += (block / 0xff) + 1;
break;
} else {
idx += DECOMP_THREADS;
}
}
return num;
#else
position_type num = 0;
uint8_t next = 0xff;
// read from the buffer
while (next == 0xff && idx < end()) {
next = rawAt(idx)[0];
++idx;
num += next;
}
// read from global memory
while (next == 0xff) {
next = m_compData[idx];
++idx;
num += next;
}
return num;
#endif
}
inline __device__ const uint8_t* raw() const
{
return m_buffer;
}
inline __device__ const uint8_t* rawAt(const position_type i) const
{
return raw() + (i - begin());
}
inline __device__ uint8_t operator[](const position_type i) const
{
if (i >= m_offset && i - m_offset < BUFFER_SIZE) {
return m_buffer[i - m_offset];
} else {
return m_compData[i];
}
}
inline __device__ void setAndAlignOffset(const position_type offset)
{
static_assert(
sizeof(size_t) == sizeof(const uint8_t*),
"Size of pointer must be equal to size_t.");
const uint8_t* const alignedPtr = reinterpret_cast<const uint8_t*>(
(reinterpret_cast<size_t>(m_compData + offset)
/ sizeof(double_word_type))
* sizeof(double_word_type));
m_offset = alignedPtr - m_compData;
}
inline __device__ void loadAt(const position_type offset)
{
setAndAlignOffset(offset);
if (m_offset + BUFFER_SIZE <= m_length) {
assert(
reinterpret_cast<size_t>(m_compData + m_offset)
% sizeof(double_word_type)
== 0);
assert(BUFFER_SIZE == DECOMP_THREADS * sizeof(double_word_type));
const double_word_type* const word_data
= reinterpret_cast<const double_word_type*>(m_compData + m_offset);
double_word_type* const word_buffer
= reinterpret_cast<double_word_type*>(m_buffer);
word_buffer[threadIdx.x] = word_data[threadIdx.x];
} else {
#pragma unroll
for (int i = threadIdx.x; i < BUFFER_SIZE; i += DECOMP_THREADS) {
if (m_offset + i < m_length) {
m_buffer[i] = m_compData[m_offset + i];
}
}
}
syncCTA();
}
inline __device__ position_type begin() const
{
return m_offset;
}
inline __device__ position_type end() const
{
return m_offset + BUFFER_SIZE;
}
private:
position_type m_offset;
const position_type m_length;
uint8_t* const m_buffer;
const uint8_t* const m_compData;
}; //End BufferControl Class
inline __device__ void coopCopyNoOverlap(
uint8_t* const dest, const uint8_t* const source, const size_t length)
{
for (size_t i = threadIdx.x; i < length; i += blockDim.x) {
dest[i] = source[i];
}
}
inline __device__ void coopCopyRepeat(
uint8_t* const dest,
const uint8_t* const source,
const position_type dist,
const position_type length)
{
// if there is overlap, it means we repeat, so we just
// need to organize our copy around that
for (position_type i = threadIdx.x; i < length; i += blockDim.x) {
dest[i] = source[i % dist];
}
}
inline __device__ void coopCopyOverlap(
uint8_t* const dest,
const uint8_t* const source,
const position_type dist,
const position_type length)
{
if (dist < length) {
coopCopyRepeat(dest, source, dist, length);
} else {
coopCopyNoOverlap(dest, source, length);
}
}
inline __device__ position_type hash(const word_type key)
{
// needs to be 12 bits
return (__brev(key) + (key^0xc375)) & (HASH_TABLE_SIZE - 1);
}
inline __device__ uint8_t encodePair(const uint8_t t1, const uint8_t t2)
{
return ((t1 & 0x0f) << 4) | (t2 & 0x0f);
}
inline __device__ token_type decodePair(const uint8_t num)
{
return token_type{static_cast<uint8_t>((num & 0xf0) >> 4),
static_cast<uint8_t>(num & 0x0f)};
}
template<int BLOCK_SIZE>
inline __device__ void copyLiterals(
uint8_t* const dest, const uint8_t* const source, const size_t length)
{
assert(BLOCK_SIZE == blockDim.x);
for (size_t i = threadIdx.x; i < length; i += BLOCK_SIZE) {
dest[i] = source[i];
}
}
inline __device__ position_type lengthOfMatch(
const uint8_t* const data,
const position_type prev_location,
const position_type next_location,
const position_type length)
{
assert(prev_location < next_location);
position_type match_length = length - next_location - 5;
for (position_type j = 0; j + next_location + 5 < length; j += blockDim.x) {
const position_type i = threadIdx.x + j;
int match = i + next_location + 5 < length
? (data[prev_location + i] != data[next_location + i])
: 1;
match = warpBallot(match);
if (match) {
match_length = j + __clz(__brev(match));
break;
}
}
return match_length;
}
inline __device__ position_type
convertIdx(const offset_type offset, const position_type pos)
{
constexpr const position_type OFFSET_SIZE = MAX_OFFSET + 1;
assert(offset <= pos);
position_type realPos = (pos / OFFSET_SIZE) * OFFSET_SIZE + offset;
if (realPos >= pos) {
realPos -= OFFSET_SIZE;
}
assert(realPos < pos);
return realPos;
}
inline __device__ bool isValidHash(
const uint8_t* const data,
const offset_type* const hashTable,
const position_type key,
const position_type hashPos,
const position_type decomp_idx)
{
const position_type hashed_offset = hashTable[hashPos];
if (hashed_offset == NULL_OFFSET) {
return false;
}
const position_type offset = convertIdx(hashed_offset, decomp_idx);
if (decomp_idx - offset > MAX_OFFSET) {
// can't match current position, ahead, or NULL_OFFSET
return false;
}
const word_type hashKey = readWord<word_type>(data + offset);
if (hashKey != key) {
return false;
}
return true;
}
template<int BLOCK_SIZE>
inline __device__ void writeSequenceData(
uint8_t* const compData,
const uint8_t* const decompData,
const token_type token,
const offset_type offset,
const position_type decomp_idx,
position_type& comp_idx)
{
assert(token.num_matches == 0 || token.num_matches >= 4);
// -> add token
if (threadIdx.x == 0) {
compData[comp_idx]
= encodePair(token.numLiteralsForHeader(), token.numMatchesForHeader());
}
++comp_idx;
// -> add literal length
const position_type literalEncodingLength = token.lengthOfLiteralEncoding();
if (literalEncodingLength) {
writeLSIC<BLOCK_SIZE>(compData + comp_idx, token.numLiteralsOverflow());
comp_idx += literalEncodingLength;
}
// -> add literals
copyLiterals<BLOCK_SIZE>(
compData + comp_idx, decompData + decomp_idx, token.num_literals);
comp_idx += token.num_literals;
// -> add offset
if (token.num_matches > 0) {
assert(offset > 0);
if (threadIdx.x == 0) {
writeWord(compData + comp_idx, offset);
}
comp_idx += sizeof(offset);
// -> add match length
if (token.hasNumMatchesOverflow()) {
writeLSIC<BLOCK_SIZE>(compData + comp_idx, token.numMatchesOverflow());
comp_idx += token.lengthOfMatchEncoding();
}
}
}
inline __device__ int numValidThreadsToMask(
const int numValidThreads)
{
return 0xffffffff >> (32-numValidThreads);
}
inline __device__ void insertHashTableWarp(
offset_type* hashTable,
const offset_type pos,
const word_type next,
const int numValidThreads)
{
position_type hashPos = hash(next);
if (threadIdx.x < numValidThreads) {
const int match = warpMatchAny(numValidThreadsToMask(numValidThreads), hashPos);
if (!match || 31 - __clz(match) == threadIdx.x) {
// I'm the last match -- can insert
hashTable[hashPos] = pos & MAX_OFFSET;
}
}
__syncwarp();
}
__device__ void compressStream(
uint8_t* compData,
const uint8_t* decompData,
offset_type* const hashTable,
const size_t length,
size_t* comp_length)
{
assert(blockDim.x == COMP_THREADS);
static_assert(COMP_THREADS <= 32, "Compression can be done with at "
"most one warp");
position_type decomp_idx = 0;
position_type comp_idx = 0;
for (position_type i = threadIdx.x; i < HASH_TABLE_SIZE; i += COMP_THREADS) {
hashTable[i] = NULL_OFFSET;
}
__syncwarp();
while (decomp_idx < length) {
const position_type tokenStart = decomp_idx;
while (true) {
if (decomp_idx + 5 + 4 >= length) {
// jump to end
decomp_idx = length;
// no match -- literals to the end
token_type tok;
tok.num_literals = length - tokenStart;
tok.num_matches = 0;
writeSequenceData<COMP_THREADS>(compData, decompData, tok, 0, tokenStart, comp_idx);
break;
}
// begin adding tokens to the hash table until we find a match
uint8_t byte = 0;
if (decomp_idx + 5 + threadIdx.x < length) {
byte = decompData[decomp_idx + threadIdx.x];
}
// each thread needs a four byte word, but only separated by a byte e.g.:
// for two threads, the five bytes [ 0x12 0x34 0x56 0x78 0x9a ] would
// be assigned as [0x78563412 0x9a785634 ] to the two threads
// (little-endian). That means when reading 32 bytes, we can only fill
// the first 29 thread's 4-byte words.
word_type next = byte;
// collect second byte
next |= __shfl_down_sync(0xffffffff, byte, 1) << 8;
// collect third and fourth bytes
next |= __shfl_down_sync(0xffffffff, next, 2) << 16;
// since we do not have valid data for the last 3 threads (or more if
// we're at the end of the data), mark them as inactive.
const int numValidThreads
= min(static_cast<size_t>(COMP_THREADS - 3), length - decomp_idx - 9);
// first try to find a local match
position_type match_location = length;
int match_mask_self = 0;
if (threadIdx.x < numValidThreads) {
match_mask_self = warpMatchAny(numValidThreadsToMask(numValidThreads), next);
}
// each thread has a mask of other threads with matches, next we need
// to find the first thread with a match before it
const int match_mask_warp = warpBallot(
match_mask_self && __clz(__brev(match_mask_self)) != threadIdx.x);
int first_match_thread;
if (match_mask_warp) {
// find the byte offset (thread id) within the warp where the first
// match is located
first_match_thread = __clz(__brev(match_mask_warp));
// determine the global position for the finding thread
match_location = __clz(__brev(match_mask_self)) + decomp_idx;
// comunicate the global position of the match to other threads
match_location = __shfl_sync(0xffffffff, match_location, first_match_thread);
} else {
first_match_thread = COMP_THREADS;
}
// only go to the hash table, if there is a possibility of a finding an
// earlier match
if (first_match_thread > 0) {
// go to hash table for an earlier match
position_type hashPos = hash(next);
const int match_found = threadIdx.x < numValidThreads
? isValidHash(
decompData,
hashTable,
next,
hashPos,
decomp_idx + threadIdx.x)
: 0;
// determine the first thread to find a match
const int match = warpBallot(match_found);
const int candidate_first_match_thread = __clz(__brev(match));
assert(candidate_first_match_thread != threadIdx.x || match_found);
assert(!match_found || candidate_first_match_thread <= threadIdx.x);
if (candidate_first_match_thread < first_match_thread) {
// if we found a valid match, and it occurs before a previously found
// match, use that
first_match_thread = candidate_first_match_thread;
hashPos = __shfl_sync(0xffffffff, hashPos, first_match_thread);
match_location
= convertIdx(hashTable[hashPos], decomp_idx + first_match_thread);
}
}
if (match_location != length) {
// insert up to the match into the hash table
insertHashTableWarp(
hashTable, decomp_idx + threadIdx.x, next, first_match_thread);
const position_type pos = decomp_idx + first_match_thread;
assert(match_location < pos);
assert(pos - match_location <= MAX_OFFSET);
// we found a match
const offset_type match_offset = pos - match_location;
assert(match_offset > 0);
assert(match_offset <= pos);
const position_type num_literals = pos - tokenStart;
// compute match length
const position_type num_matches
= lengthOfMatch(decompData, match_location, pos, length);
// -> write our token and literal length
token_type tok;
tok.num_literals = num_literals;
tok.num_matches = num_matches;
// update our position
decomp_idx = tokenStart + num_matches + num_literals;
// insert only the literals into the hash table
writeSequenceData<COMP_THREADS>(
compData, decompData, tok, match_offset, tokenStart, comp_idx);
break;
}
// insert everything into hash table
insertHashTableWarp(
hashTable, decomp_idx + threadIdx.x, next, numValidThreads);
decomp_idx += numValidThreads;
}
}
if (threadIdx.x == 0) {
*comp_length = comp_idx;
}
}
inline __device__ void decompressStream(
uint8_t* buffer,
uint8_t* decompData,
const uint8_t* compData,
position_type length)
{
position_type comp_end = length;
BufferControl ctrl(buffer, compData, comp_end);
ctrl.loadAt(0);
position_type decomp_idx = 0;
position_type comp_idx = 0;
while (comp_idx < comp_end) {
if (comp_idx + PREFETCH_DIST > ctrl.end()) {
ctrl.loadAt(comp_idx);
}
// read header byte
token_type tok = decodePair(*ctrl.rawAt(comp_idx));
++comp_idx;
// read the length of the literals
position_type num_literals = tok.num_literals;
if (tok.num_literals == 15) {
num_literals += ctrl.readLSIC(comp_idx);
}
const position_type literalStart = comp_idx;
// copy the literals to the out stream
if (num_literals + comp_idx > ctrl.end()) {
coopCopyNoOverlap(
decompData + decomp_idx, compData + comp_idx, num_literals);
} else {
// our buffer can copy
coopCopyNoOverlap(
decompData + decomp_idx, ctrl.rawAt(comp_idx), num_literals);
}
comp_idx += num_literals;
decomp_idx += num_literals;
// Note that the last sequence stops right after literals field.
// There are specific parsing rules to respect to be compatible with the
// reference decoder : 1) The last 5 bytes are always literals 2) The last
// match cannot start within the last 12 bytes Consequently, a file with
// less then 13 bytes can only be represented as literals These rules are in
// place to benefit speed and ensure buffer limits are never crossed.
if (comp_idx < comp_end) {
// read the offset
offset_type offset;
if (comp_idx + sizeof(offset_type) > ctrl.end()) {
offset = readWord<offset_type>(compData + comp_idx);
} else {
offset = readWord<offset_type>(ctrl.rawAt(comp_idx));
}
comp_idx += sizeof(offset_type);
// read the match length
position_type match = 4 + tok.num_matches;
if (tok.num_matches == 15) {
match += ctrl.readLSIC(comp_idx);
}
// copy match
if (offset <= num_literals
&& (ctrl.begin() <= literalStart
&& ctrl.end() >= literalStart + num_literals)) {
// we are using literals already present in our buffer
coopCopyOverlap(
decompData + decomp_idx,
ctrl.rawAt(literalStart + (num_literals - offset)),
offset,
match);
// we need to sync after we copy since we use the buffer
syncCTA();
} else {
// we need to sync before we copy since we use decomp
syncCTA();
coopCopyOverlap(
decompData + decomp_idx,
decompData + decomp_idx - offset,
offset,
match);
}
decomp_idx += match;
}
}
assert(comp_idx == comp_end);
}
template <typename T>
struct BlockPrefixCallbackOp
{
T m_running_total;
__device__ BlockPrefixCallbackOp(const T running_total) :
m_running_total(running_total)
{
}
__device__ T operator()(const T block_aggregate)
{
const T old_prefix = m_running_total;
m_running_total += block_aggregate;
return old_prefix;
}
__device__ T total() const
{
return m_running_total;
}
};
template <int BLOCK_SIZE>
inline __device__ void generateItemChunkMappings(
const size_t* const decomp_sizes,
const size_t target_chunk,
const size_t batch_size,
const int max_chunk_size,
item_type& item,
size_t& local_chunk)
{
using BlockScan = typename hipcub::BlockScan<size_t, BLOCK_SIZE>;
// each thread is assigned a chunk, and they cooperatively prefix sum
// the items, and then write out the their chunk's item. The first thread
// assigned to any given item also writes out it's prefix
__shared__ typename BlockScan::TempStorage temp_space;
__shared__ size_t local_prefix[BLOCK_SIZE + 1];
BlockPrefixCallbackOp<size_t> prefix_op(0);
item = static_cast<item_type>(-1);
// we have thread per item computing the prefix sum
for (size_t item_start = 0; item_start < batch_size;
item_start += BLOCK_SIZE) {
const size_t i = item_start + threadIdx.x;
const size_t item_chunks
= i < batch_size ? roundUpDiv(decomp_sizes[i], max_chunk_size) : 0;
BlockScan(temp_space)
.ExclusiveSum(item_chunks, local_prefix[threadIdx.x], prefix_op);
if (threadIdx.x == 0) {
local_prefix[BLOCK_SIZE] = prefix_op.total();
}
__syncthreads();
// if a thread's chunk lies in this set of items
if (target_chunk >= local_prefix[0]
&& target_chunk < local_prefix[BLOCK_SIZE]) {
int beg = item_start;
int end = min(item_start + BLOCK_SIZE, batch_size) - 1;
// Binary search for the right chunk -- we know it exists, so we don't
// have to handle cases of before or after the sequence. We find the
// first index the target chunk is less than
item = (end + beg) / 2;
while (beg < end) {
const size_t chunk = local_prefix[item + 1 - item_start];
if (chunk <= target_chunk) {
assert(beg != chunk);
// the current mid-point works as a lower bound
beg = item + 1;
} else {
// the current mid-point does not work as a lower bound, so it must
// work as an upper bound
end = item;
}
item = (end + beg) / 2;
}
// the target for this thread is here
local_chunk = target_chunk - local_prefix[item - item_start];
}
__syncthreads();
}
}
template <int BLOCK_SIZE>
__global__ void lz4CompressGenerateHeaders(
const uint8_t* const* const decomp_data,
const size_t* const decomp_sizes,
uint8_t* const comp_data,
size_t* const* const comp_sizes,
const size_t batch_size,
const int max_chunk_size,
const size_t total_chunks,
const size_t* const scratch_space_offset,
uint8_t* const* const scratch_space,
compression_chunk_header* const headers,
size_t* const item_prefix,
item_type* const item_map)
{
const size_t target_chunk = BLOCK_SIZE * blockIdx.x + threadIdx.x;
item_type item;
size_t local_chunk;
generateItemChunkMappings<BLOCK_SIZE>(
decomp_sizes,
target_chunk,
batch_size,
max_chunk_size,
item,
local_chunk);
// write out items and chunk id's
if (target_chunk < total_chunks) {
if (local_chunk == 0) {
item_prefix[item] = target_chunk;
}
assert(item < batch_size);
item_map[target_chunk] = item;
const size_t chunk_offset
= local_chunk * static_cast<size_t>(max_chunk_size);
const size_t chunk_end = chunk_offset + max_chunk_size;
const size_t comp_offset = maxSizeOfStream(max_chunk_size) * target_chunk;
compression_chunk_header h;
h.src = decomp_data[item] + chunk_offset;
h.dst = comp_data + comp_offset;
h.hash = reinterpret_cast<offset_type*>(
scratch_space[item] + scratch_space_offset[item]
+ (HASH_TABLE_SIZE * sizeof(offset_type)) * local_chunk);
h.comp_size = comp_sizes[item] + local_chunk;
h.size = min(chunk_end, decomp_sizes[item]) - chunk_offset;
headers[target_chunk] = h;
}
}
__global__ void
lz4CompressMultistreamKernel(const compression_chunk_header* const headers)
{
const int bidx = blockIdx.x * blockDim.y + threadIdx.y;
const uint8_t* decomp_ptr = headers[bidx].src;
const size_t decomp_length = headers[bidx].size;
uint8_t* comp_ptr = headers[bidx].dst;
size_t* const comp_length = headers[bidx].comp_size;
compressStream(
comp_ptr, decomp_ptr, headers[bidx].hash, decomp_length, comp_length);
}
template <int BLOCK_SIZE>
__global__ void lz4CompressSumSizes(
size_t* const* const sizes,
const size_t* const offsets,
const size_t* const decomp_sizes,
const size_t chunk_size)
{
using BlockScan = typename hipcub::BlockScan<size_t, BLOCK_SIZE>;
__shared__ typename BlockScan::TempStorage temp_space;
BlockPrefixCallbackOp<size_t> prefix_op(offsets[blockIdx.x]);
const size_t num = roundUpDiv(decomp_sizes[blockIdx.x], chunk_size);
size_t size = 0;
for (size_t i = 0; i < num; i += BLOCK_SIZE) {
const size_t index = i + threadIdx.x;
if (index < num) {
size = sizes[blockIdx.x][index];
} else {
size = 0;
}
BlockScan(temp_space).ExclusiveSum(size, size, prefix_op);
if (index < num) {
sizes[blockIdx.x][index] = size;
}
__syncthreads();
}
if (threadIdx.x == 0) {
sizes[blockIdx.x][num] = prefix_op.total();
}
}
template <int BLOCK_SIZE>
__global__ void copyToContig(
const item_type* const item_map,
const size_t* const item_prefix,
const uint8_t* const temp_data,
const int stride,
const size_t* const* const comp_prefix,
uint8_t* const* const comp_data)
{
const size_t global_chunk = blockIdx.x;
const size_t item = item_map[global_chunk];
// we assume there are no empty items
assert(item <= global_chunk);
const size_t local_chunk = global_chunk - item_prefix[item];
const size_t offset = comp_prefix[item][local_chunk];
const size_t size = comp_prefix[item][local_chunk + 1] - offset;
for (size_t i = threadIdx.x; i < size; i += BLOCK_SIZE) {
comp_data[item][offset + i] = temp_data[stride * global_chunk + i];
}
}
__global__ void lz4DecompressMultistreamKernel(
const chunk_header* const headers, const int num_chunks)
{
const int bid = blockIdx.x * Y_DIM + threadIdx.y;
__shared__ uint8_t buffer[BUFFER_SIZE * Y_DIM];
if (bid < num_chunks) {
uint8_t* const decomp_ptr = headers[bid].dst;
const uint8_t* const comp_ptr = headers[bid].src;
const size_t chunk_length = headers[bid].size;
decompressStream(
buffer + threadIdx.y * BUFFER_SIZE, decomp_ptr, comp_ptr, chunk_length);
}
}
__global__ void lz4DecompressGenerateHeaders(
uint8_t* const decomp_data,
const uint8_t* const comp_data,
const size_t* const comp_chunk_prefix,
const size_t decomp_chunk_size,
const size_t num_chunks,
chunk_header* const headers)
{
const int chunk = threadIdx.x + blockIdx.x * blockDim.x;
if (chunk < num_chunks) {
const size_t comp_chunk_offset = comp_chunk_prefix[chunk];
const size_t decomp_chunk_offset = chunk * decomp_chunk_size;
chunk_header h;
h.src = comp_data + comp_chunk_offset;
h.dst = decomp_data + decomp_chunk_offset;
h.size = comp_chunk_prefix[chunk + 1] - comp_chunk_prefix[chunk];
headers[chunk] = h;
}
}
/******************************************************************************
* PUBLIC FUNCTIONS ***********************************************************
*****************************************************************************/
void lz4CompressBatch(
const uint8_t* const* const decomp_data_device,
const size_t* const decomp_prefixes_device,
const size_t* const decomp_sizes_host,
const size_t batch_size,
const size_t max_chunk_size,
uint8_t* const temp_data_device,
const size_t temp_bytes,
uint8_t* const* const comp_data_device,
size_t* const* const comp_prefixes_device,
const size_t* const comp_prefix_offset_device,
hipStream_t stream)
{
if (max_chunk_size < lz4MinChunkSize()) {
throw std::runtime_error(
"Minimum chunk size for LZ4 is " + std::to_string(MIN_CHUNK_SIZE));
} else if (max_chunk_size > lz4MaxChunkSize()) {
throw std::runtime_error(
"Maximum chunk size for LZ4 is " + std::to_string(MAX_CHUNK_SIZE));
}
// most of the kernels take a negligible amount of time, so by default we
// just use 128 threads. this value, however is choose arbitrarily, and
// has not been tuned for any architecture or dataset size.
constexpr const int BLOCK_SIZE = 128;
const size_t stride = lz4ComputeMaxSize(max_chunk_size);
const size_t chunks_in_batch
= lz4ComputeChunksInBatch(decomp_sizes_host, batch_size, max_chunk_size);
TempSpaceBroker broker(temp_data_device, temp_bytes);
uint8_t* staging_space;
broker.reserve(&staging_space, chunks_in_batch * stride);
compression_chunk_header* headers;
broker.reserve(&headers, chunks_in_batch);
// look up starting chunk per item
size_t* item_prefix;
broker.reserve(&item_prefix, batch_size);
// look up item per chunk
item_type* item_map;
broker.reserve(&item_map, chunks_in_batch);
// setup headers
{
const dim3 grid(roundUpDiv(chunks_in_batch, BLOCK_SIZE));
const dim3 block(BLOCK_SIZE);
hipLaunchKernelGGL(( lz4CompressGenerateHeaders<BLOCK_SIZE>), dim3(grid), dim3(block), 0, stream,
decomp_data_device,
decomp_prefixes_device,
staging_space,
comp_prefixes_device,
batch_size,
max_chunk_size,
chunks_in_batch,
comp_prefix_offset_device,
comp_data_device,
headers,
item_prefix,
item_map);
CudaUtils::check_last_error();
}
// perform compression
{
const dim3 grid(chunks_in_batch);
const dim3 block(COMP_THREADS);
hipLaunchKernelGGL(( lz4CompressMultistreamKernel), dim3(grid), dim3(block), 0, stream, headers);
CudaUtils::check_last_error();
}
// perform prefix sum
{
const dim3 grid(batch_size);
const dim3 block(BLOCK_SIZE);
hipLaunchKernelGGL(( lz4CompressSumSizes<BLOCK_SIZE>), dim3(grid), dim3(block), 0, stream,
comp_prefixes_device,
comp_prefix_offset_device,
decomp_prefixes_device,
max_chunk_size);
CudaUtils::check_last_error();
}
{
const dim3 grid(chunks_in_batch);
// Since we are copying a whole chunk per thread block, maximize the number
// of threads we have copying each block
const dim3 block(1024);
// Copy prefix sums values to metadata header and copy compressed data into
// contiguous space
hipLaunchKernelGGL(( copyToContig<1024>), dim3(grid), dim3(block), 0, stream,
item_map,
item_prefix,
staging_space,
stride,
comp_prefixes_device,
comp_data_device);
CudaUtils::check_last_error();
}
}
void lz4DecompressBatches(
void* const temp_space,
const size_t temp_size,
void* const* decompData,
const uint8_t* const* compData,
int batch_size,
const size_t** compPrefix,
int chunk_size,
int* chunks_in_item,
hipStream_t stream)
{
TempSpaceBroker broker(temp_space, temp_size);
int total_chunks=0;
for(int i=0; i<batch_size; i++) {
total_chunks += chunks_in_item[i];
}
chunk_header* headers;
broker.reserve(&headers, total_chunks);
int chunk_start=0;
for(int i=0; i<batch_size; i++) {
const dim3 header_block(128);
const dim3 header_grid(roundUpDiv(chunks_in_item[i], header_block.x));
hipLaunchKernelGGL(( lz4DecompressGenerateHeaders), dim3(header_grid), dim3(header_block), 0, stream,
static_cast<uint8_t*>(decompData[i]),
compData[i],
compPrefix[i],
chunk_size,
chunks_in_item[i],
&headers[chunk_start]);
chunk_start += chunks_in_item[i];
}
hipLaunchKernelGGL(( lz4DecompressMultistreamKernel),
dim3(roundUpDiv(total_chunks, Y_DIM)),
dim3(dim3(DECOMP_THREADS, Y_DIM, 1)),
0,
stream, headers, total_chunks);
}
size_t lz4ComputeChunksInBatch(
const size_t* const decomp_data_size,
const size_t batch_size,
const size_t chunk_size)
{
size_t num_chunks = 0;
for (size_t i = 0; i < batch_size; ++i) {
num_chunks += roundUpDiv(decomp_data_size[i], chunk_size);
}
return num_chunks;
}
size_t lz4CompressComputeTempSize(
const size_t maxChunksInBatch, const size_t chunkSize)
{
const size_t batch_size = 1;
size_t prefix_temp_size;
hipError_t err = hipcub::DeviceScan::InclusiveSum(
NULL,
prefix_temp_size,
static_cast<const size_t*>(nullptr),
static_cast<size_t*>(nullptr),
maxChunksInBatch + 1);
if (err != hipSuccess) {
throw std::runtime_error(
"Failed to get space for cub inclusive sub: " + std::to_string(err));
}
const size_t strideSize = lz4ComputeMaxSize(chunkSize);
const size_t staging_size
= roundUpTo(strideSize * maxChunksInBatch, sizeof(size_t));
const size_t prefix_out_size = sizeof(size_t) * (maxChunksInBatch + 1);
const size_t header_size = roundUpTo(
sizeof(compression_chunk_header) * maxChunksInBatch, sizeof(size_t));
const size_t map_size
= roundUpTo(sizeof(uint32_t) * maxChunksInBatch, sizeof(size_t));
const size_t prefix_size = sizeof(size_t) * batch_size;
return prefix_temp_size + prefix_out_size + staging_size + +header_size
+ map_size + prefix_size;
}
size_t lz4DecompressComputeTempSize(
const size_t maxChunksInBatch, const size_t /* chunkSize */)
{
const size_t header_size = sizeof(chunk_header) * maxChunksInBatch;
return roundUpTo(header_size, sizeof(size_t));
}
size_t lz4ComputeMaxSize(const size_t size)
{
return maxSizeOfStream(size);
}
size_t lz4MinChunkSize()
{
return MIN_CHUNK_SIZE;
}
size_t lz4MaxChunkSize()
{
return MAX_CHUNK_SIZE;
}
} // nvcomp namespace
| 703bca904894cd6bc476c6136677372dc78ac16e.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "CudaUtils.h"
#include "LZ4CompressionKernels.h"
#include "TempSpaceBroker.h"
#include "common.h"
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Weffc++"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#endif
#include <cub/cub.cuh>
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
#include "cuda_runtime.h"
#include <cassert>
#include <fstream>
#include <iostream>
#include <vector>
using offset_type = uint16_t;
using word_type = uint32_t;
using position_type = size_t;
using double_word_type = uint64_t;
using item_type = uint32_t;
namespace nvcomp {
constexpr const int COMP_THREADS = 32;
constexpr const int DECOMP_THREADS = 32;
constexpr const int Y_DIM = 2;
constexpr const position_type BUFFER_SIZE
= DECOMP_THREADS * sizeof(double_word_type);
constexpr const position_type PREFETCH_DIST = BUFFER_SIZE / 2;
constexpr const position_type HASH_TABLE_SIZE = 1U << 14;
constexpr const offset_type NULL_OFFSET = static_cast<offset_type>(-1);
constexpr const position_type MAX_OFFSET = (1U << 16) - 1;
constexpr const size_t MIN_CHUNK_SIZE = sizeof(offset_type) * HASH_TABLE_SIZE;
constexpr const size_t MAX_CHUNK_SIZE = 1U << 24; // 16 MB
// ideally this would fit in a quad-word -- right now though it spills into
// 24-bytes (instead of 16-bytes).
struct chunk_header
{
const uint8_t* src;
uint8_t* dst;
uint32_t size;
};
struct compression_chunk_header
{
const uint8_t* src;
uint8_t* dst;
offset_type* hash;
size_t* comp_size;
uint32_t size;
};
/******************************************************************************
* DEVICE FUNCTIONS AND KERNELS ***********************************************
*****************************************************************************/
inline __device__ __host__ size_t maxSizeOfStream(const size_t size)
{
const size_t expansion = size + 1 + roundUpDiv(size, 255);
return roundUpTo(expansion, sizeof(size_t));
}
inline __device__ void syncCTA()
{
if (DECOMP_THREADS > 32) {
__syncthreads();
} else {
__syncwarp();
}
}
inline __device__ int warpBallot(int vote)
{
return __ballot_sync(0xffffffff, vote);
}
template <typename T>
inline __device__ int warpMatchAny(const int participants, T val)
{
#if __CUDA_ARCH__ >= 700
return __match_any_sync(participants, val);
#else
int mask = 0;
// full search
assert(blockDim.x == 32);
for (int d = 1; d < 32; ++d) {
const int nbr_id = (threadIdx.x + d) & 31;
mask |= (val == __shfl_sync(participants, val, nbr_id)) << nbr_id;
}
return mask;
#endif
}
template <typename T>
inline __device__ void writeWord(uint8_t* const address, const T word)
{
#pragma unroll
for (size_t i = 0; i < sizeof(T); ++i) {
address[i] = static_cast<uint8_t>((word >> (8 * i)) & 0xff);
}
}
template <typename T>
inline __device__ T readWord(const uint8_t* const address)
{
T word = 0;
for (size_t i = 0; i < sizeof(T); ++i) {
word |= address[i] << (8 * i);
}
return word;
}
template<int BLOCK_SIZE>
inline __device__ void writeLSIC(uint8_t* const out, const position_type number)
{
assert(BLOCK_SIZE == blockDim.x);
const position_type num = (number / 0xffu) + 1;
const uint8_t leftOver = number % 0xffu;
for (size_t i = threadIdx.x; i < num; i += BLOCK_SIZE) {
const uint8_t val = i + 1 < num ? 0xffu : leftOver;
out[i] = val;
}
}
struct token_type
{
position_type num_literals;
position_type num_matches;
__device__ bool hasNumLiteralsOverflow() const
{
return num_literals >= 15;
}
__device__ bool hasNumMatchesOverflow() const
{
return num_matches >= 19;
}
__device__ position_type numLiteralsOverflow() const
{
if (hasNumLiteralsOverflow()) {
return num_literals - 15;
} else {
return 0;
}
}
__device__ uint8_t numLiteralsForHeader() const
{
if (hasNumLiteralsOverflow()) {
return 15;
} else {
return num_literals;
}
}
__device__ position_type numMatchesOverflow() const
{
if (hasNumMatchesOverflow()) {
assert(num_matches >= 19);
return num_matches - 19;
} else {
assert(num_matches < 19);
return 0;
}
}
__device__ uint8_t numMatchesForHeader() const
{
if (hasNumMatchesOverflow()) {
return 15;
} else {
return num_matches - 4;
}
}
__device__ position_type lengthOfLiteralEncoding() const
{
if (hasNumLiteralsOverflow()) {
const position_type num = numLiteralsOverflow();
const position_type length = (num / 0xff) + 1;
return length;
}
return 0;
}
__device__ position_type lengthOfMatchEncoding() const
{
if (hasNumMatchesOverflow()) {
const position_type num = numMatchesOverflow();
const position_type length = (num / 0xff) + 1;
return length;
}
return 0;
}
};
class BufferControl
{
public:
__device__ BufferControl(
uint8_t* const buffer, const uint8_t* const compData, const position_type length) :
m_offset(0),
m_length(length),
m_buffer(buffer),
m_compData(compData)
{
// do nothing
}
#ifdef WARP_READ_LSIC
// this is currently unused as its slower
inline __device__ position_type queryLSIC(const position_type idx) const
{
if (idx + DECOMP_THREADS <= end()) {
// most likely case
const uint8_t byte = rawAt(idx)[threadIdx.x];
uint32_t mask = warpBallot(byte != 0xff);
mask = __brev(mask);
const position_type fullBytes = __clz(mask);
if (fullBytes < DECOMP_THREADS) {
return fullBytes * 0xff + rawAt(idx)[fullBytes];
} else {
return DECOMP_THREADS * 0xff;
}
} else {
uint8_t byte;
if (idx + threadIdx.x < end()) {
byte = rawAt(idx)[threadIdx.x];
} else {
byte = m_compData[idx + threadIdx.x];
}
uint32_t mask = warpBallot(byte != 0xff);
mask = __brev(mask);
const position_type fullBytes = __clz(mask);
if (fullBytes < DECOMP_THREADS) {
return fullBytes * 0xff + __shfl_sync(0xffffffff, byte, fullBytes);
} else {
return DECOMP_THREADS * 0xff;
}
}
}
#endif
inline __device__ position_type readLSIC(position_type& idx) const
{
#ifdef WARP_READ_LSIC
position_type num = 0;
while (true) {
const position_type block = queryLSIC(idx);
num += block;
if (block < DECOMP_THREADS * 0xff) {
idx += (block / 0xff) + 1;
break;
} else {
idx += DECOMP_THREADS;
}
}
return num;
#else
position_type num = 0;
uint8_t next = 0xff;
// read from the buffer
while (next == 0xff && idx < end()) {
next = rawAt(idx)[0];
++idx;
num += next;
}
// read from global memory
while (next == 0xff) {
next = m_compData[idx];
++idx;
num += next;
}
return num;
#endif
}
inline __device__ const uint8_t* raw() const
{
return m_buffer;
}
inline __device__ const uint8_t* rawAt(const position_type i) const
{
return raw() + (i - begin());
}
inline __device__ uint8_t operator[](const position_type i) const
{
if (i >= m_offset && i - m_offset < BUFFER_SIZE) {
return m_buffer[i - m_offset];
} else {
return m_compData[i];
}
}
inline __device__ void setAndAlignOffset(const position_type offset)
{
static_assert(
sizeof(size_t) == sizeof(const uint8_t*),
"Size of pointer must be equal to size_t.");
const uint8_t* const alignedPtr = reinterpret_cast<const uint8_t*>(
(reinterpret_cast<size_t>(m_compData + offset)
/ sizeof(double_word_type))
* sizeof(double_word_type));
m_offset = alignedPtr - m_compData;
}
inline __device__ void loadAt(const position_type offset)
{
setAndAlignOffset(offset);
if (m_offset + BUFFER_SIZE <= m_length) {
assert(
reinterpret_cast<size_t>(m_compData + m_offset)
% sizeof(double_word_type)
== 0);
assert(BUFFER_SIZE == DECOMP_THREADS * sizeof(double_word_type));
const double_word_type* const word_data
= reinterpret_cast<const double_word_type*>(m_compData + m_offset);
double_word_type* const word_buffer
= reinterpret_cast<double_word_type*>(m_buffer);
word_buffer[threadIdx.x] = word_data[threadIdx.x];
} else {
#pragma unroll
for (int i = threadIdx.x; i < BUFFER_SIZE; i += DECOMP_THREADS) {
if (m_offset + i < m_length) {
m_buffer[i] = m_compData[m_offset + i];
}
}
}
syncCTA();
}
inline __device__ position_type begin() const
{
return m_offset;
}
inline __device__ position_type end() const
{
return m_offset + BUFFER_SIZE;
}
private:
position_type m_offset;
const position_type m_length;
uint8_t* const m_buffer;
const uint8_t* const m_compData;
}; //End BufferControl Class
inline __device__ void coopCopyNoOverlap(
uint8_t* const dest, const uint8_t* const source, const size_t length)
{
for (size_t i = threadIdx.x; i < length; i += blockDim.x) {
dest[i] = source[i];
}
}
inline __device__ void coopCopyRepeat(
uint8_t* const dest,
const uint8_t* const source,
const position_type dist,
const position_type length)
{
// if there is overlap, it means we repeat, so we just
// need to organize our copy around that
for (position_type i = threadIdx.x; i < length; i += blockDim.x) {
dest[i] = source[i % dist];
}
}
inline __device__ void coopCopyOverlap(
uint8_t* const dest,
const uint8_t* const source,
const position_type dist,
const position_type length)
{
if (dist < length) {
coopCopyRepeat(dest, source, dist, length);
} else {
coopCopyNoOverlap(dest, source, length);
}
}
inline __device__ position_type hash(const word_type key)
{
// needs to be 12 bits
return (__brev(key) + (key^0xc375)) & (HASH_TABLE_SIZE - 1);
}
inline __device__ uint8_t encodePair(const uint8_t t1, const uint8_t t2)
{
return ((t1 & 0x0f) << 4) | (t2 & 0x0f);
}
inline __device__ token_type decodePair(const uint8_t num)
{
return token_type{static_cast<uint8_t>((num & 0xf0) >> 4),
static_cast<uint8_t>(num & 0x0f)};
}
template<int BLOCK_SIZE>
inline __device__ void copyLiterals(
uint8_t* const dest, const uint8_t* const source, const size_t length)
{
assert(BLOCK_SIZE == blockDim.x);
for (size_t i = threadIdx.x; i < length; i += BLOCK_SIZE) {
dest[i] = source[i];
}
}
inline __device__ position_type lengthOfMatch(
const uint8_t* const data,
const position_type prev_location,
const position_type next_location,
const position_type length)
{
assert(prev_location < next_location);
position_type match_length = length - next_location - 5;
for (position_type j = 0; j + next_location + 5 < length; j += blockDim.x) {
const position_type i = threadIdx.x + j;
int match = i + next_location + 5 < length
? (data[prev_location + i] != data[next_location + i])
: 1;
match = warpBallot(match);
if (match) {
match_length = j + __clz(__brev(match));
break;
}
}
return match_length;
}
inline __device__ position_type
convertIdx(const offset_type offset, const position_type pos)
{
constexpr const position_type OFFSET_SIZE = MAX_OFFSET + 1;
assert(offset <= pos);
position_type realPos = (pos / OFFSET_SIZE) * OFFSET_SIZE + offset;
if (realPos >= pos) {
realPos -= OFFSET_SIZE;
}
assert(realPos < pos);
return realPos;
}
inline __device__ bool isValidHash(
const uint8_t* const data,
const offset_type* const hashTable,
const position_type key,
const position_type hashPos,
const position_type decomp_idx)
{
const position_type hashed_offset = hashTable[hashPos];
if (hashed_offset == NULL_OFFSET) {
return false;
}
const position_type offset = convertIdx(hashed_offset, decomp_idx);
if (decomp_idx - offset > MAX_OFFSET) {
// can't match current position, ahead, or NULL_OFFSET
return false;
}
const word_type hashKey = readWord<word_type>(data + offset);
if (hashKey != key) {
return false;
}
return true;
}
template<int BLOCK_SIZE>
inline __device__ void writeSequenceData(
uint8_t* const compData,
const uint8_t* const decompData,
const token_type token,
const offset_type offset,
const position_type decomp_idx,
position_type& comp_idx)
{
assert(token.num_matches == 0 || token.num_matches >= 4);
// -> add token
if (threadIdx.x == 0) {
compData[comp_idx]
= encodePair(token.numLiteralsForHeader(), token.numMatchesForHeader());
}
++comp_idx;
// -> add literal length
const position_type literalEncodingLength = token.lengthOfLiteralEncoding();
if (literalEncodingLength) {
writeLSIC<BLOCK_SIZE>(compData + comp_idx, token.numLiteralsOverflow());
comp_idx += literalEncodingLength;
}
// -> add literals
copyLiterals<BLOCK_SIZE>(
compData + comp_idx, decompData + decomp_idx, token.num_literals);
comp_idx += token.num_literals;
// -> add offset
if (token.num_matches > 0) {
assert(offset > 0);
if (threadIdx.x == 0) {
writeWord(compData + comp_idx, offset);
}
comp_idx += sizeof(offset);
// -> add match length
if (token.hasNumMatchesOverflow()) {
writeLSIC<BLOCK_SIZE>(compData + comp_idx, token.numMatchesOverflow());
comp_idx += token.lengthOfMatchEncoding();
}
}
}
inline __device__ int numValidThreadsToMask(
const int numValidThreads)
{
return 0xffffffff >> (32-numValidThreads);
}
inline __device__ void insertHashTableWarp(
offset_type* hashTable,
const offset_type pos,
const word_type next,
const int numValidThreads)
{
position_type hashPos = hash(next);
if (threadIdx.x < numValidThreads) {
const int match = warpMatchAny(numValidThreadsToMask(numValidThreads), hashPos);
if (!match || 31 - __clz(match) == threadIdx.x) {
// I'm the last match -- can insert
hashTable[hashPos] = pos & MAX_OFFSET;
}
}
__syncwarp();
}
__device__ void compressStream(
uint8_t* compData,
const uint8_t* decompData,
offset_type* const hashTable,
const size_t length,
size_t* comp_length)
{
assert(blockDim.x == COMP_THREADS);
static_assert(COMP_THREADS <= 32, "Compression can be done with at "
"most one warp");
position_type decomp_idx = 0;
position_type comp_idx = 0;
for (position_type i = threadIdx.x; i < HASH_TABLE_SIZE; i += COMP_THREADS) {
hashTable[i] = NULL_OFFSET;
}
__syncwarp();
while (decomp_idx < length) {
const position_type tokenStart = decomp_idx;
while (true) {
if (decomp_idx + 5 + 4 >= length) {
// jump to end
decomp_idx = length;
// no match -- literals to the end
token_type tok;
tok.num_literals = length - tokenStart;
tok.num_matches = 0;
writeSequenceData<COMP_THREADS>(compData, decompData, tok, 0, tokenStart, comp_idx);
break;
}
// begin adding tokens to the hash table until we find a match
uint8_t byte = 0;
if (decomp_idx + 5 + threadIdx.x < length) {
byte = decompData[decomp_idx + threadIdx.x];
}
// each thread needs a four byte word, but only separated by a byte e.g.:
// for two threads, the five bytes [ 0x12 0x34 0x56 0x78 0x9a ] would
// be assigned as [0x78563412 0x9a785634 ] to the two threads
// (little-endian). That means when reading 32 bytes, we can only fill
// the first 29 thread's 4-byte words.
word_type next = byte;
// collect second byte
next |= __shfl_down_sync(0xffffffff, byte, 1) << 8;
// collect third and fourth bytes
next |= __shfl_down_sync(0xffffffff, next, 2) << 16;
// since we do not have valid data for the last 3 threads (or more if
// we're at the end of the data), mark them as inactive.
const int numValidThreads
= min(static_cast<size_t>(COMP_THREADS - 3), length - decomp_idx - 9);
// first try to find a local match
position_type match_location = length;
int match_mask_self = 0;
if (threadIdx.x < numValidThreads) {
match_mask_self = warpMatchAny(numValidThreadsToMask(numValidThreads), next);
}
// each thread has a mask of other threads with matches, next we need
// to find the first thread with a match before it
const int match_mask_warp = warpBallot(
match_mask_self && __clz(__brev(match_mask_self)) != threadIdx.x);
int first_match_thread;
if (match_mask_warp) {
// find the byte offset (thread id) within the warp where the first
// match is located
first_match_thread = __clz(__brev(match_mask_warp));
// determine the global position for the finding thread
match_location = __clz(__brev(match_mask_self)) + decomp_idx;
// comunicate the global position of the match to other threads
match_location = __shfl_sync(0xffffffff, match_location, first_match_thread);
} else {
first_match_thread = COMP_THREADS;
}
// only go to the hash table, if there is a possibility of a finding an
// earlier match
if (first_match_thread > 0) {
// go to hash table for an earlier match
position_type hashPos = hash(next);
const int match_found = threadIdx.x < numValidThreads
? isValidHash(
decompData,
hashTable,
next,
hashPos,
decomp_idx + threadIdx.x)
: 0;
// determine the first thread to find a match
const int match = warpBallot(match_found);
const int candidate_first_match_thread = __clz(__brev(match));
assert(candidate_first_match_thread != threadIdx.x || match_found);
assert(!match_found || candidate_first_match_thread <= threadIdx.x);
if (candidate_first_match_thread < first_match_thread) {
// if we found a valid match, and it occurs before a previously found
// match, use that
first_match_thread = candidate_first_match_thread;
hashPos = __shfl_sync(0xffffffff, hashPos, first_match_thread);
match_location
= convertIdx(hashTable[hashPos], decomp_idx + first_match_thread);
}
}
if (match_location != length) {
// insert up to the match into the hash table
insertHashTableWarp(
hashTable, decomp_idx + threadIdx.x, next, first_match_thread);
const position_type pos = decomp_idx + first_match_thread;
assert(match_location < pos);
assert(pos - match_location <= MAX_OFFSET);
// we found a match
const offset_type match_offset = pos - match_location;
assert(match_offset > 0);
assert(match_offset <= pos);
const position_type num_literals = pos - tokenStart;
// compute match length
const position_type num_matches
= lengthOfMatch(decompData, match_location, pos, length);
// -> write our token and literal length
token_type tok;
tok.num_literals = num_literals;
tok.num_matches = num_matches;
// update our position
decomp_idx = tokenStart + num_matches + num_literals;
// insert only the literals into the hash table
writeSequenceData<COMP_THREADS>(
compData, decompData, tok, match_offset, tokenStart, comp_idx);
break;
}
// insert everything into hash table
insertHashTableWarp(
hashTable, decomp_idx + threadIdx.x, next, numValidThreads);
decomp_idx += numValidThreads;
}
}
if (threadIdx.x == 0) {
*comp_length = comp_idx;
}
}
inline __device__ void decompressStream(
uint8_t* buffer,
uint8_t* decompData,
const uint8_t* compData,
position_type length)
{
position_type comp_end = length;
BufferControl ctrl(buffer, compData, comp_end);
ctrl.loadAt(0);
position_type decomp_idx = 0;
position_type comp_idx = 0;
while (comp_idx < comp_end) {
if (comp_idx + PREFETCH_DIST > ctrl.end()) {
ctrl.loadAt(comp_idx);
}
// read header byte
token_type tok = decodePair(*ctrl.rawAt(comp_idx));
++comp_idx;
// read the length of the literals
position_type num_literals = tok.num_literals;
if (tok.num_literals == 15) {
num_literals += ctrl.readLSIC(comp_idx);
}
const position_type literalStart = comp_idx;
// copy the literals to the out stream
if (num_literals + comp_idx > ctrl.end()) {
coopCopyNoOverlap(
decompData + decomp_idx, compData + comp_idx, num_literals);
} else {
// our buffer can copy
coopCopyNoOverlap(
decompData + decomp_idx, ctrl.rawAt(comp_idx), num_literals);
}
comp_idx += num_literals;
decomp_idx += num_literals;
// Note that the last sequence stops right after literals field.
// There are specific parsing rules to respect to be compatible with the
// reference decoder : 1) The last 5 bytes are always literals 2) The last
// match cannot start within the last 12 bytes Consequently, a file with
// less then 13 bytes can only be represented as literals These rules are in
// place to benefit speed and ensure buffer limits are never crossed.
if (comp_idx < comp_end) {
// read the offset
offset_type offset;
if (comp_idx + sizeof(offset_type) > ctrl.end()) {
offset = readWord<offset_type>(compData + comp_idx);
} else {
offset = readWord<offset_type>(ctrl.rawAt(comp_idx));
}
comp_idx += sizeof(offset_type);
// read the match length
position_type match = 4 + tok.num_matches;
if (tok.num_matches == 15) {
match += ctrl.readLSIC(comp_idx);
}
// copy match
if (offset <= num_literals
&& (ctrl.begin() <= literalStart
&& ctrl.end() >= literalStart + num_literals)) {
// we are using literals already present in our buffer
coopCopyOverlap(
decompData + decomp_idx,
ctrl.rawAt(literalStart + (num_literals - offset)),
offset,
match);
// we need to sync after we copy since we use the buffer
syncCTA();
} else {
// we need to sync before we copy since we use decomp
syncCTA();
coopCopyOverlap(
decompData + decomp_idx,
decompData + decomp_idx - offset,
offset,
match);
}
decomp_idx += match;
}
}
assert(comp_idx == comp_end);
}
template <typename T>
struct BlockPrefixCallbackOp
{
T m_running_total;
__device__ BlockPrefixCallbackOp(const T running_total) :
m_running_total(running_total)
{
}
__device__ T operator()(const T block_aggregate)
{
const T old_prefix = m_running_total;
m_running_total += block_aggregate;
return old_prefix;
}
__device__ T total() const
{
return m_running_total;
}
};
template <int BLOCK_SIZE>
inline __device__ void generateItemChunkMappings(
const size_t* const decomp_sizes,
const size_t target_chunk,
const size_t batch_size,
const int max_chunk_size,
item_type& item,
size_t& local_chunk)
{
using BlockScan = typename cub::BlockScan<size_t, BLOCK_SIZE>;
// each thread is assigned a chunk, and they cooperatively prefix sum
// the items, and then write out the their chunk's item. The first thread
// assigned to any given item also writes out it's prefix
__shared__ typename BlockScan::TempStorage temp_space;
__shared__ size_t local_prefix[BLOCK_SIZE + 1];
BlockPrefixCallbackOp<size_t> prefix_op(0);
item = static_cast<item_type>(-1);
// we have thread per item computing the prefix sum
for (size_t item_start = 0; item_start < batch_size;
item_start += BLOCK_SIZE) {
const size_t i = item_start + threadIdx.x;
const size_t item_chunks
= i < batch_size ? roundUpDiv(decomp_sizes[i], max_chunk_size) : 0;
BlockScan(temp_space)
.ExclusiveSum(item_chunks, local_prefix[threadIdx.x], prefix_op);
if (threadIdx.x == 0) {
local_prefix[BLOCK_SIZE] = prefix_op.total();
}
__syncthreads();
// if a thread's chunk lies in this set of items
if (target_chunk >= local_prefix[0]
&& target_chunk < local_prefix[BLOCK_SIZE]) {
int beg = item_start;
int end = min(item_start + BLOCK_SIZE, batch_size) - 1;
// Binary search for the right chunk -- we know it exists, so we don't
// have to handle cases of before or after the sequence. We find the
// first index the target chunk is less than
item = (end + beg) / 2;
while (beg < end) {
const size_t chunk = local_prefix[item + 1 - item_start];
if (chunk <= target_chunk) {
assert(beg != chunk);
// the current mid-point works as a lower bound
beg = item + 1;
} else {
// the current mid-point does not work as a lower bound, so it must
// work as an upper bound
end = item;
}
item = (end + beg) / 2;
}
// the target for this thread is here
local_chunk = target_chunk - local_prefix[item - item_start];
}
__syncthreads();
}
}
template <int BLOCK_SIZE>
__global__ void lz4CompressGenerateHeaders(
const uint8_t* const* const decomp_data,
const size_t* const decomp_sizes,
uint8_t* const comp_data,
size_t* const* const comp_sizes,
const size_t batch_size,
const int max_chunk_size,
const size_t total_chunks,
const size_t* const scratch_space_offset,
uint8_t* const* const scratch_space,
compression_chunk_header* const headers,
size_t* const item_prefix,
item_type* const item_map)
{
const size_t target_chunk = BLOCK_SIZE * blockIdx.x + threadIdx.x;
item_type item;
size_t local_chunk;
generateItemChunkMappings<BLOCK_SIZE>(
decomp_sizes,
target_chunk,
batch_size,
max_chunk_size,
item,
local_chunk);
// write out items and chunk id's
if (target_chunk < total_chunks) {
if (local_chunk == 0) {
item_prefix[item] = target_chunk;
}
assert(item < batch_size);
item_map[target_chunk] = item;
const size_t chunk_offset
= local_chunk * static_cast<size_t>(max_chunk_size);
const size_t chunk_end = chunk_offset + max_chunk_size;
const size_t comp_offset = maxSizeOfStream(max_chunk_size) * target_chunk;
compression_chunk_header h;
h.src = decomp_data[item] + chunk_offset;
h.dst = comp_data + comp_offset;
h.hash = reinterpret_cast<offset_type*>(
scratch_space[item] + scratch_space_offset[item]
+ (HASH_TABLE_SIZE * sizeof(offset_type)) * local_chunk);
h.comp_size = comp_sizes[item] + local_chunk;
h.size = min(chunk_end, decomp_sizes[item]) - chunk_offset;
headers[target_chunk] = h;
}
}
__global__ void
lz4CompressMultistreamKernel(const compression_chunk_header* const headers)
{
const int bidx = blockIdx.x * blockDim.y + threadIdx.y;
const uint8_t* decomp_ptr = headers[bidx].src;
const size_t decomp_length = headers[bidx].size;
uint8_t* comp_ptr = headers[bidx].dst;
size_t* const comp_length = headers[bidx].comp_size;
compressStream(
comp_ptr, decomp_ptr, headers[bidx].hash, decomp_length, comp_length);
}
template <int BLOCK_SIZE>
__global__ void lz4CompressSumSizes(
size_t* const* const sizes,
const size_t* const offsets,
const size_t* const decomp_sizes,
const size_t chunk_size)
{
using BlockScan = typename cub::BlockScan<size_t, BLOCK_SIZE>;
__shared__ typename BlockScan::TempStorage temp_space;
BlockPrefixCallbackOp<size_t> prefix_op(offsets[blockIdx.x]);
const size_t num = roundUpDiv(decomp_sizes[blockIdx.x], chunk_size);
size_t size = 0;
for (size_t i = 0; i < num; i += BLOCK_SIZE) {
const size_t index = i + threadIdx.x;
if (index < num) {
size = sizes[blockIdx.x][index];
} else {
size = 0;
}
BlockScan(temp_space).ExclusiveSum(size, size, prefix_op);
if (index < num) {
sizes[blockIdx.x][index] = size;
}
__syncthreads();
}
if (threadIdx.x == 0) {
sizes[blockIdx.x][num] = prefix_op.total();
}
}
template <int BLOCK_SIZE>
__global__ void copyToContig(
const item_type* const item_map,
const size_t* const item_prefix,
const uint8_t* const temp_data,
const int stride,
const size_t* const* const comp_prefix,
uint8_t* const* const comp_data)
{
const size_t global_chunk = blockIdx.x;
const size_t item = item_map[global_chunk];
// we assume there are no empty items
assert(item <= global_chunk);
const size_t local_chunk = global_chunk - item_prefix[item];
const size_t offset = comp_prefix[item][local_chunk];
const size_t size = comp_prefix[item][local_chunk + 1] - offset;
for (size_t i = threadIdx.x; i < size; i += BLOCK_SIZE) {
comp_data[item][offset + i] = temp_data[stride * global_chunk + i];
}
}
__global__ void lz4DecompressMultistreamKernel(
const chunk_header* const headers, const int num_chunks)
{
const int bid = blockIdx.x * Y_DIM + threadIdx.y;
__shared__ uint8_t buffer[BUFFER_SIZE * Y_DIM];
if (bid < num_chunks) {
uint8_t* const decomp_ptr = headers[bid].dst;
const uint8_t* const comp_ptr = headers[bid].src;
const size_t chunk_length = headers[bid].size;
decompressStream(
buffer + threadIdx.y * BUFFER_SIZE, decomp_ptr, comp_ptr, chunk_length);
}
}
__global__ void lz4DecompressGenerateHeaders(
uint8_t* const decomp_data,
const uint8_t* const comp_data,
const size_t* const comp_chunk_prefix,
const size_t decomp_chunk_size,
const size_t num_chunks,
chunk_header* const headers)
{
const int chunk = threadIdx.x + blockIdx.x * blockDim.x;
if (chunk < num_chunks) {
const size_t comp_chunk_offset = comp_chunk_prefix[chunk];
const size_t decomp_chunk_offset = chunk * decomp_chunk_size;
chunk_header h;
h.src = comp_data + comp_chunk_offset;
h.dst = decomp_data + decomp_chunk_offset;
h.size = comp_chunk_prefix[chunk + 1] - comp_chunk_prefix[chunk];
headers[chunk] = h;
}
}
/******************************************************************************
* PUBLIC FUNCTIONS ***********************************************************
*****************************************************************************/
void lz4CompressBatch(
const uint8_t* const* const decomp_data_device,
const size_t* const decomp_prefixes_device,
const size_t* const decomp_sizes_host,
const size_t batch_size,
const size_t max_chunk_size,
uint8_t* const temp_data_device,
const size_t temp_bytes,
uint8_t* const* const comp_data_device,
size_t* const* const comp_prefixes_device,
const size_t* const comp_prefix_offset_device,
cudaStream_t stream)
{
if (max_chunk_size < lz4MinChunkSize()) {
throw std::runtime_error(
"Minimum chunk size for LZ4 is " + std::to_string(MIN_CHUNK_SIZE));
} else if (max_chunk_size > lz4MaxChunkSize()) {
throw std::runtime_error(
"Maximum chunk size for LZ4 is " + std::to_string(MAX_CHUNK_SIZE));
}
// most of the kernels take a negligible amount of time, so by default we
// just use 128 threads. this value, however is choose arbitrarily, and
// has not been tuned for any architecture or dataset size.
constexpr const int BLOCK_SIZE = 128;
const size_t stride = lz4ComputeMaxSize(max_chunk_size);
const size_t chunks_in_batch
= lz4ComputeChunksInBatch(decomp_sizes_host, batch_size, max_chunk_size);
TempSpaceBroker broker(temp_data_device, temp_bytes);
uint8_t* staging_space;
broker.reserve(&staging_space, chunks_in_batch * stride);
compression_chunk_header* headers;
broker.reserve(&headers, chunks_in_batch);
// look up starting chunk per item
size_t* item_prefix;
broker.reserve(&item_prefix, batch_size);
// look up item per chunk
item_type* item_map;
broker.reserve(&item_map, chunks_in_batch);
// setup headers
{
const dim3 grid(roundUpDiv(chunks_in_batch, BLOCK_SIZE));
const dim3 block(BLOCK_SIZE);
lz4CompressGenerateHeaders<BLOCK_SIZE><<<grid, block, 0, stream>>>(
decomp_data_device,
decomp_prefixes_device,
staging_space,
comp_prefixes_device,
batch_size,
max_chunk_size,
chunks_in_batch,
comp_prefix_offset_device,
comp_data_device,
headers,
item_prefix,
item_map);
CudaUtils::check_last_error();
}
// perform compression
{
const dim3 grid(chunks_in_batch);
const dim3 block(COMP_THREADS);
lz4CompressMultistreamKernel<<<grid, block, 0, stream>>>(headers);
CudaUtils::check_last_error();
}
// perform prefix sum
{
const dim3 grid(batch_size);
const dim3 block(BLOCK_SIZE);
lz4CompressSumSizes<BLOCK_SIZE><<<grid, block, 0, stream>>>(
comp_prefixes_device,
comp_prefix_offset_device,
decomp_prefixes_device,
max_chunk_size);
CudaUtils::check_last_error();
}
{
const dim3 grid(chunks_in_batch);
// Since we are copying a whole chunk per thread block, maximize the number
// of threads we have copying each block
const dim3 block(1024);
// Copy prefix sums values to metadata header and copy compressed data into
// contiguous space
copyToContig<1024><<<grid, block, 0, stream>>>(
item_map,
item_prefix,
staging_space,
stride,
comp_prefixes_device,
comp_data_device);
CudaUtils::check_last_error();
}
}
void lz4DecompressBatches(
void* const temp_space,
const size_t temp_size,
void* const* decompData,
const uint8_t* const* compData,
int batch_size,
const size_t** compPrefix,
int chunk_size,
int* chunks_in_item,
cudaStream_t stream)
{
TempSpaceBroker broker(temp_space, temp_size);
int total_chunks=0;
for(int i=0; i<batch_size; i++) {
total_chunks += chunks_in_item[i];
}
chunk_header* headers;
broker.reserve(&headers, total_chunks);
int chunk_start=0;
for(int i=0; i<batch_size; i++) {
const dim3 header_block(128);
const dim3 header_grid(roundUpDiv(chunks_in_item[i], header_block.x));
lz4DecompressGenerateHeaders<<<header_grid, header_block, 0, stream>>>(
static_cast<uint8_t*>(decompData[i]),
compData[i],
compPrefix[i],
chunk_size,
chunks_in_item[i],
&headers[chunk_start]);
chunk_start += chunks_in_item[i];
}
lz4DecompressMultistreamKernel<<<
roundUpDiv(total_chunks, Y_DIM),
dim3(DECOMP_THREADS, Y_DIM, 1),
0,
stream>>>(headers, total_chunks);
}
size_t lz4ComputeChunksInBatch(
const size_t* const decomp_data_size,
const size_t batch_size,
const size_t chunk_size)
{
size_t num_chunks = 0;
for (size_t i = 0; i < batch_size; ++i) {
num_chunks += roundUpDiv(decomp_data_size[i], chunk_size);
}
return num_chunks;
}
size_t lz4CompressComputeTempSize(
const size_t maxChunksInBatch, const size_t chunkSize)
{
const size_t batch_size = 1;
size_t prefix_temp_size;
cudaError_t err = cub::DeviceScan::InclusiveSum(
NULL,
prefix_temp_size,
static_cast<const size_t*>(nullptr),
static_cast<size_t*>(nullptr),
maxChunksInBatch + 1);
if (err != cudaSuccess) {
throw std::runtime_error(
"Failed to get space for cub inclusive sub: " + std::to_string(err));
}
const size_t strideSize = lz4ComputeMaxSize(chunkSize);
const size_t staging_size
= roundUpTo(strideSize * maxChunksInBatch, sizeof(size_t));
const size_t prefix_out_size = sizeof(size_t) * (maxChunksInBatch + 1);
const size_t header_size = roundUpTo(
sizeof(compression_chunk_header) * maxChunksInBatch, sizeof(size_t));
const size_t map_size
= roundUpTo(sizeof(uint32_t) * maxChunksInBatch, sizeof(size_t));
const size_t prefix_size = sizeof(size_t) * batch_size;
return prefix_temp_size + prefix_out_size + staging_size + +header_size
+ map_size + prefix_size;
}
size_t lz4DecompressComputeTempSize(
const size_t maxChunksInBatch, const size_t /* chunkSize */)
{
const size_t header_size = sizeof(chunk_header) * maxChunksInBatch;
return roundUpTo(header_size, sizeof(size_t));
}
size_t lz4ComputeMaxSize(const size_t size)
{
return maxSizeOfStream(size);
}
size_t lz4MinChunkSize()
{
return MIN_CHUNK_SIZE;
}
size_t lz4MaxChunkSize()
{
return MAX_CHUNK_SIZE;
}
} // nvcomp namespace
|
d54513265b16ef25638c8ec3344f8e2864d1c64a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <chrono>
#include "array2d.h"
#include "cuda_helper.h"
#include "mat_bench.h"
#define value_t double
#define index_t int
__global__ void kernel(index_t Nx, index_t Ny, value_t a, value_t *x)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int gid = i * Ny + j;
if (i < Nx && j < Ny)
x[gid] *= a;
}
struct mat_copy : public mat_bench<value_t, index_t>
{
void benchmark()
{
print_bench();
std::cout << "\nSimulation info: 2d mat scale\n";
value_t **x = create_array2d<value_t, index_t>(side_size, side_size);
#pragma omp parallel for
for (index_t i = 0; i < side_size; i++)
{
for (index_t j = 0; j < side_size; j++)
{
x[i][j] = 1.0;
}
}
value_t *d_x;
value_t *h_x = x[0];
checkCudaErrors(hipMalloc(&d_x, total_size * sizeof(value_t)));
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipMemcpy(d_x, h_x, total_size * sizeof(value_t), hipMemcpyHostToDevice));
dim3 blockd3 = dim3(block0, block1, 1);
dim3 grid = calc_grid2d(blockd3, side_size, side_size);
std::cout << " Block: " << blockd3.x << "(x) X " << blockd3.y << "(y)\n"
<< " Grid size: " << grid.x << "(x) X " << grid.y << "(y)\n\n";
loops = 0;
auto startcpu = std::chrono::high_resolution_clock::now();
checkCudaErrors(hipEventRecord(start));
while ((std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - startcpu)
.count()) < 1000.0 * benchtime)
{
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(blockd3), 0, 0, side_size, side_size, 0.5, d_x);
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(blockd3), 0, 0, side_size, side_size, 2.0, d_x);
checkCudaErrorsAfterKernels;
loops++;
}
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
float du = 0;
checkCudaErrors(hipEventElapsedTime(&du, start, stop));
duration = 1.0e-3 * du;
checkCudaErrors(hipMemcpy(h_x, d_x, total_size * sizeof(value_t), hipMemcpyDeviceToHost));
test_result(x, value_t(total_size));
print_performance();
delete[] x;
checkCudaErrors(hipFree(d_x));
}
mat_copy(int narg, char **arg) : mat_bench<value_t, index_t>(narg, arg)
{
memory_transfer_per_loop = 2.0 * sizeof(value_t) * 2.0 * double(total_size) /
(1024.0 * 1024.0 * 1024.0);
}
};
int main(int narg, char **arg)
{
check_cuda_device();
mat_copy test(narg, arg);
test.benchmark();
} | d54513265b16ef25638c8ec3344f8e2864d1c64a.cu | #include <chrono>
#include "array2d.h"
#include "cuda_helper.h"
#include "mat_bench.h"
#define value_t double
#define index_t int
__global__ void kernel(index_t Nx, index_t Ny, value_t a, value_t *x)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int gid = i * Ny + j;
if (i < Nx && j < Ny)
x[gid] *= a;
}
struct mat_copy : public mat_bench<value_t, index_t>
{
void benchmark()
{
print_bench();
std::cout << "\nSimulation info: 2d mat scale\n";
value_t **x = create_array2d<value_t, index_t>(side_size, side_size);
#pragma omp parallel for
for (index_t i = 0; i < side_size; i++)
{
for (index_t j = 0; j < side_size; j++)
{
x[i][j] = 1.0;
}
}
value_t *d_x;
value_t *h_x = x[0];
checkCudaErrors(cudaMalloc(&d_x, total_size * sizeof(value_t)));
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaMemcpy(d_x, h_x, total_size * sizeof(value_t), cudaMemcpyHostToDevice));
dim3 blockd3 = dim3(block0, block1, 1);
dim3 grid = calc_grid2d(blockd3, side_size, side_size);
std::cout << " Block: " << blockd3.x << "(x) X " << blockd3.y << "(y)\n"
<< " Grid size: " << grid.x << "(x) X " << grid.y << "(y)\n\n";
loops = 0;
auto startcpu = std::chrono::high_resolution_clock::now();
checkCudaErrors(cudaEventRecord(start));
while ((std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - startcpu)
.count()) < 1000.0 * benchtime)
{
kernel<<<grid, blockd3>>>(side_size, side_size, 0.5, d_x);
kernel<<<grid, blockd3>>>(side_size, side_size, 2.0, d_x);
checkCudaErrorsAfterKernels;
loops++;
}
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
float du = 0;
checkCudaErrors(cudaEventElapsedTime(&du, start, stop));
duration = 1.0e-3 * du;
checkCudaErrors(cudaMemcpy(h_x, d_x, total_size * sizeof(value_t), cudaMemcpyDeviceToHost));
test_result(x, value_t(total_size));
print_performance();
delete[] x;
checkCudaErrors(cudaFree(d_x));
}
mat_copy(int narg, char **arg) : mat_bench<value_t, index_t>(narg, arg)
{
memory_transfer_per_loop = 2.0 * sizeof(value_t) * 2.0 * double(total_size) /
(1024.0 * 1024.0 * 1024.0);
}
};
int main(int narg, char **arg)
{
check_cuda_device();
mat_copy test(narg, arg);
test.benchmark();
} |
84ffd920c36451b7c433c2a02b1c35a8fb6f47eb.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
int main()
{
hipError_t err;
double *v;
int c;
hipGetDeviceCount(&c);
std::cout << c << std::endl;
err = hipMalloc(&v, 100*sizeof(double));
std::cout << hipGetErrorString(err) << std::endl;
err = hipFree(v);
std::cout << hipGetErrorString(err) << std::endl;
}
| 84ffd920c36451b7c433c2a02b1c35a8fb6f47eb.cu | #include <iostream>
#include <cuda.h>
int main()
{
cudaError_t err;
double *v;
int c;
cudaGetDeviceCount(&c);
std::cout << c << std::endl;
err = cudaMalloc(&v, 100*sizeof(double));
std::cout << cudaGetErrorString(err) << std::endl;
err = cudaFree(v);
std::cout << cudaGetErrorString(err) << std::endl;
}
|
4e2cbc6899be24ba7ce2c893c2c0f91d65936c0c.hip | // !!! This is a file automatically generated by hipify!!!
// Version 0.5.0 CUDA-C: Malbec
// Dr. Gonzalo Damin Quiroga
// Universidad Industrial de Santander
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "common/handle.h"
#include "common/menu.h"
#include "kernel/kernel.h"
#include "kernel/kernel.cu"
#include "common/menu.c"
int main(int argc,char **argv)
{
//Setting the Spacetime Parameters and Initial conditions
double Rot;
menu(&Rot);
int n_ic;
printf("Set the number of initial conditions: ");
scanf("%d", &n_ic);
//Solver settings
double h, final_time;
double evol_time = 0.0;
//Integration steps
printf("Final Time: ");
scanf("%lf", &final_time);
printf("Initial step size: ");
scanf("%lf", &h);
// Host input/output vectors
double *h_x0, *h_x1, *h_x2, *h_x3,*h_px0, *h_px1,*h_px2, *h_px3;
// Device input/output vectors
double *d_x0, *d_x1, *d_x2, *d_x3,*d_px0, *d_px1,*d_px2, *d_px3;
// Size, in bytes, of each vector
double nBytes = n_ic*sizeof(double);
// Allocate memory for each vector on host
h_x0 = (double *)malloc(nBytes);
h_x1= (double *)malloc(nBytes);
h_x2 = (double *)malloc(nBytes);
h_x3 = (double *)malloc(nBytes);
h_px0= (double *)malloc(nBytes);
h_px1 = (double *)malloc(nBytes);
h_px2= (double *)malloc(nBytes);
h_px3 = (double *)malloc(nBytes);
// Allocate memory for each vector on GPU
printf("Allocating device memory on host..\n");
HANDLE_ERROR(hipMalloc((void **)&d_x0,nBytes));
HANDLE_ERROR(hipMalloc((void **)&d_x1,nBytes));
HANDLE_ERROR(hipMalloc((void **)&d_x2,nBytes));
HANDLE_ERROR(hipMalloc((void **)&d_x3,nBytes));
HANDLE_ERROR(hipMalloc((void **)&d_px0,nBytes));
HANDLE_ERROR(hipMalloc((void **)&d_px1,nBytes));
HANDLE_ERROR(hipMalloc((void **)&d_px2,nBytes));
HANDLE_ERROR(hipMalloc((void **)&d_px3,nBytes));
// Initial conditions on host
FILE *fic = fopen("ic/ic.txt", "r");
if (fic == NULL)
{
perror("Error: can't open ic.txt.");
return -1;
}
//Read the initial conditions
int count;
for (count = 0; count < n_ic; count++)
{
fscanf(fic, "%lf %lf %lf %lf %lf %lf %lf %lf", &h_x0[count], &h_x1[count], &h_x2[count], &h_x3[count], &h_px0[count], &h_px1[count], &h_px2[count], &h_px3[count]);
}
//Close the file
fclose(fic);
//Set the Block and grid Size
int blockSize, gridSize,minGridSize;
// Number of threads in each thread block.
//Suggested block size to achieve maximum occupancy.
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, rk4, 0, n_ic);
gridSize = n_ic/blockSize;
dim3 dimBlock(blockSize,1,1);
dim3 dimGrid(gridSize,1,1);
// Copy host vectors to device
printf("Copying to device..\n");
HANDLE_ERROR(hipMemcpy(d_x0,h_x0,nBytes,hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_x1,h_x1,nBytes,hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_x2,h_x2,nBytes,hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_x3,h_x3,nBytes,hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_px0,h_px0,nBytes,hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_px1,h_px1,nBytes,hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_px2,h_px2,nBytes,hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_px3,h_px3,nBytes,hipMemcpyHostToDevice));
//Evol: Time Start
clock_t start_d=clock();
//System Evolution
printf("Executing Kernel.. \n");
printf("Output file created.. \n");
//Open fout
FILE *fout = fopen("output/output.txt", "w");
if (fout == NULL)
{
perror("Error: the output folder does not exist.");
return -1;
}
//Print the initial conditions in the output file
int i;
for (i = 0; i < n_ic; i++)
{
fprintf(fout, "%.8lf %.16lf %.16lf %.16lf %.16lf \n", evol_time, h_x0[i], sqrt(h_x1[i]*h_x1[i]+Rot*Rot)*sin(h_x2[i])*cos(h_x3[i]),
sqrt(h_x1[i]*h_x1[i]+Rot*Rot)*sin(h_x2[i])*sin(h_x3[i]), h_x1[i]*cos(h_x2[i]));
}
evol_time=h+evol_time;
printf("Evolving the systems.. \n");
do
{
// Executing kernel
hipLaunchKernelGGL(( rk4), dim3(gridSize),dim3(blockSize), 0, 0, Rot, d_x0,d_x1,d_x2,d_x3,d_px0,d_px1,d_px2,d_px3, n_ic, h);
hipDeviceSynchronize();
// Copy array back to host
HANDLE_ERROR(hipMemcpy(h_x0,d_x0,nBytes,hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(h_x1,d_x1,nBytes,hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(h_x2,d_x2,nBytes,hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(h_x3,d_x3,nBytes,hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(h_px0,d_px0,nBytes,hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(h_px1,d_px1,nBytes,hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(h_px2,d_px2,nBytes,hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(h_px3,d_px3,nBytes,hipMemcpyDeviceToHost));
//Print in file the initial conditions in Cartesian Coordinates
for (i = 0; i < n_ic; i++)
{
fprintf(fout, "%.8lf %.16lf %.16lf %.16lf %.16lf \n", evol_time, h_x0[i], sqrt(h_x1[i]*h_x1[i]+Rot*Rot)*sin(h_x2[i])*cos(h_x3[i]),
sqrt(h_x1[i]*h_x1[i]+Rot*Rot)*sin(h_x2[i])*sin(h_x3[i]), h_x1[i]*cos(h_x2[i]));
}
evol_time=h+evol_time;
}while(evol_time <= final_time);
//Evol: Time Ends
clock_t end_d = clock();
double time_spent = (double)(end_d-start_d)/CLOCKS_PER_SEC;
//Close the output file
fclose(fout);
// Release device memory
hipFree(d_x0);
hipFree(d_x1);
hipFree(d_x2);
hipFree(d_x3);
hipFree(d_px0);
hipFree(d_px1);
hipFree(d_px2);
hipFree(d_px3);
// Release host memory
free(h_x0);
free(h_x1);
free(h_x2);
free(h_x3);
free(h_px0);
free(h_px1);
free(h_px2);
free(h_px3);
//Log file.
printf("Printing info.log file.. \n");
FILE *flog = fopen("info.log", "w");
fprintf(flog, "Mass: 1, Rot: %lf \n", Rot);
fprintf(flog, "Initial step size: %lf, Final time: %lf, Initial Conditions: %d \n", h, final_time, n_ic);
fprintf(flog, "Integrator: RK4 Fixed Step. Number of Thread per block: %d \n", blockSize);
fprintf(flog, "Runtime: %f sec.", time_spent);
fclose(flog);
printf("Runtime: %f sec \n", time_spent);
return 0;
}
| 4e2cbc6899be24ba7ce2c893c2c0f91d65936c0c.cu | // Version 0.5.0 CUDA-C: Malbec
// Dr. Gonzalo Damián Quiroga
// Universidad Industrial de Santander
#include <cuda.h>
#include <stdio.h>
#include "common/handle.h"
#include "common/menu.h"
#include "kernel/kernel.h"
#include "kernel/kernel.cu"
#include "common/menu.c"
int main(int argc,char **argv)
{
//Setting the Spacetime Parameters and Initial conditions
double Rot;
menu(&Rot);
int n_ic;
printf("Set the number of initial conditions: ");
scanf("%d", &n_ic);
//Solver settings
double h, final_time;
double evol_time = 0.0;
//Integration steps
printf("Final Time: ");
scanf("%lf", &final_time);
printf("Initial step size: ");
scanf("%lf", &h);
// Host input/output vectors
double *h_x0, *h_x1, *h_x2, *h_x3,*h_px0, *h_px1,*h_px2, *h_px3;
// Device input/output vectors
double *d_x0, *d_x1, *d_x2, *d_x3,*d_px0, *d_px1,*d_px2, *d_px3;
// Size, in bytes, of each vector
double nBytes = n_ic*sizeof(double);
// Allocate memory for each vector on host
h_x0 = (double *)malloc(nBytes);
h_x1= (double *)malloc(nBytes);
h_x2 = (double *)malloc(nBytes);
h_x3 = (double *)malloc(nBytes);
h_px0= (double *)malloc(nBytes);
h_px1 = (double *)malloc(nBytes);
h_px2= (double *)malloc(nBytes);
h_px3 = (double *)malloc(nBytes);
// Allocate memory for each vector on GPU
printf("Allocating device memory on host..\n");
HANDLE_ERROR(cudaMalloc((void **)&d_x0,nBytes));
HANDLE_ERROR(cudaMalloc((void **)&d_x1,nBytes));
HANDLE_ERROR(cudaMalloc((void **)&d_x2,nBytes));
HANDLE_ERROR(cudaMalloc((void **)&d_x3,nBytes));
HANDLE_ERROR(cudaMalloc((void **)&d_px0,nBytes));
HANDLE_ERROR(cudaMalloc((void **)&d_px1,nBytes));
HANDLE_ERROR(cudaMalloc((void **)&d_px2,nBytes));
HANDLE_ERROR(cudaMalloc((void **)&d_px3,nBytes));
// Initial conditions on host
FILE *fic = fopen("ic/ic.txt", "r");
if (fic == NULL)
{
perror("Error: can't open ic.txt.");
return -1;
}
//Read the initial conditions
int count;
for (count = 0; count < n_ic; count++)
{
fscanf(fic, "%lf %lf %lf %lf %lf %lf %lf %lf", &h_x0[count], &h_x1[count], &h_x2[count], &h_x3[count], &h_px0[count], &h_px1[count], &h_px2[count], &h_px3[count]);
}
//Close the file
fclose(fic);
//Set the Block and grid Size
int blockSize, gridSize,minGridSize;
// Number of threads in each thread block.
//Suggested block size to achieve maximum occupancy.
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, rk4, 0, n_ic);
gridSize = n_ic/blockSize;
dim3 dimBlock(blockSize,1,1);
dim3 dimGrid(gridSize,1,1);
// Copy host vectors to device
printf("Copying to device..\n");
HANDLE_ERROR(cudaMemcpy(d_x0,h_x0,nBytes,cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_x1,h_x1,nBytes,cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_x2,h_x2,nBytes,cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_x3,h_x3,nBytes,cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_px0,h_px0,nBytes,cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_px1,h_px1,nBytes,cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_px2,h_px2,nBytes,cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_px3,h_px3,nBytes,cudaMemcpyHostToDevice));
//Evol: Time Start
clock_t start_d=clock();
//System Evolution
printf("Executing Kernel.. \n");
printf("Output file created.. \n");
//Open fout
FILE *fout = fopen("output/output.txt", "w");
if (fout == NULL)
{
perror("Error: the output folder does not exist.");
return -1;
}
//Print the initial conditions in the output file
int i;
for (i = 0; i < n_ic; i++)
{
fprintf(fout, "%.8lf %.16lf %.16lf %.16lf %.16lf \n", evol_time, h_x0[i], sqrt(h_x1[i]*h_x1[i]+Rot*Rot)*sin(h_x2[i])*cos(h_x3[i]),
sqrt(h_x1[i]*h_x1[i]+Rot*Rot)*sin(h_x2[i])*sin(h_x3[i]), h_x1[i]*cos(h_x2[i]));
}
evol_time=h+evol_time;
printf("Evolving the systems.. \n");
do
{
// Executing kernel
rk4<<<gridSize,blockSize>>>(Rot, d_x0,d_x1,d_x2,d_x3,d_px0,d_px1,d_px2,d_px3, n_ic, h);
cudaThreadSynchronize();
// Copy array back to host
HANDLE_ERROR(cudaMemcpy(h_x0,d_x0,nBytes,cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(h_x1,d_x1,nBytes,cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(h_x2,d_x2,nBytes,cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(h_x3,d_x3,nBytes,cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(h_px0,d_px0,nBytes,cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(h_px1,d_px1,nBytes,cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(h_px2,d_px2,nBytes,cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(h_px3,d_px3,nBytes,cudaMemcpyDeviceToHost));
//Print in file the initial conditions in Cartesian Coordinates
for (i = 0; i < n_ic; i++)
{
fprintf(fout, "%.8lf %.16lf %.16lf %.16lf %.16lf \n", evol_time, h_x0[i], sqrt(h_x1[i]*h_x1[i]+Rot*Rot)*sin(h_x2[i])*cos(h_x3[i]),
sqrt(h_x1[i]*h_x1[i]+Rot*Rot)*sin(h_x2[i])*sin(h_x3[i]), h_x1[i]*cos(h_x2[i]));
}
evol_time=h+evol_time;
}while(evol_time <= final_time);
//Evol: Time Ends
clock_t end_d = clock();
double time_spent = (double)(end_d-start_d)/CLOCKS_PER_SEC;
//Close the output file
fclose(fout);
// Release device memory
cudaFree(d_x0);
cudaFree(d_x1);
cudaFree(d_x2);
cudaFree(d_x3);
cudaFree(d_px0);
cudaFree(d_px1);
cudaFree(d_px2);
cudaFree(d_px3);
// Release host memory
free(h_x0);
free(h_x1);
free(h_x2);
free(h_x3);
free(h_px0);
free(h_px1);
free(h_px2);
free(h_px3);
//Log file.
printf("Printing info.log file.. \n");
FILE *flog = fopen("info.log", "w");
fprintf(flog, "Mass: 1, Rot: %lf \n", Rot);
fprintf(flog, "Initial step size: %lf, Final time: %lf, Initial Conditions: %d \n", h, final_time, n_ic);
fprintf(flog, "Integrator: RK4 Fixed Step. Number of Thread per block: %d \n", blockSize);
fprintf(flog, "Runtime: %f sec.", time_spent);
fclose(flog);
printf("Runtime: %f sec \n", time_spent);
return 0;
}
|
d7015b6643708b9e6a9e749015f666e9f27c45d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_iface.h"
#include "cuda_mparticles.cuh"
#include "cuda_bits.h"
#include "cuda_base.cuh"
#include "psc_bits.h"
#include "heating_spot_foil.hxx"
#include "heating_cuda_impl.hxx"
#include "balance.hxx"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <hiprand/hiprand_kernel.h>
#include <cstdio>
#define THREADS_PER_BLOCK 128
using Float3 = Vec3<float>;
// ----------------------------------------------------------------------
// bm_normal2
static inline float2 bm_normal2(void)
{
float u1, u2;
do {
u1 = random() * (1.f / RAND_MAX);
u2 = random() * (1.f / RAND_MAX);
} while (u1 <= 0.f);
float2 rv;
rv.x = sqrtf(-2.f * logf(u1)) * cosf(2.f * M_PI * u2);
rv.y = sqrtf(-2.f * logf(u1)) * sinf(2.f * M_PI * u2);
return rv;
}
// ----------------------------------------------------------------------
// k_curand_setup
__global__ static void k_curand_setup(hiprandState_t* d_curand_states)
{
int bid = (blockIdx.z * gridDim.y + blockIdx.y) * gridDim.x + blockIdx.x;
int id = threadIdx.x + bid * THREADS_PER_BLOCK;
hiprand_init(1234, id, 0, &d_curand_states[id]);
}
// ----------------------------------------------------------------------
// d_particle_kick
__device__ void d_particle_kick(DParticleCuda& prt, float H, float heating_dt,
hiprandState_t* state)
{
float2 r01 = hiprand_normal2(state);
float r2 = hiprand_normal(state);
float Dp = sqrtf(H * heating_dt);
prt.u[0] += Dp * r01.x;
prt.u[1] += Dp * r01.y;
prt.u[2] += Dp * r2;
}
// ----------------------------------------------------------------------
// k_heating_run_foil
template <typename BS, typename HS>
__global__ static void __launch_bounds__(THREADS_PER_BLOCK, 3)
k_heating_run_foil(HS foil, DMparticlesCuda<BS> dmprts, float heating_dt,
Float3* d_xb_by_patch, hiprandState_t* d_curand_states,
int n_blocks)
{
BlockSimple2<BS, typename HS::dim> current_block;
/* Copy state to local memory for efficiency */
int bid = blockIdx.x;
int id = threadIdx.x + bid * blockDim.x;
hiprandState_t local_state = d_curand_states[id];
for (; bid < n_blocks; bid += gridDim.x) {
current_block.init(dmprts, bid);
Float3 xb; // __shared__
xb[0] = d_xb_by_patch[current_block.p][0];
xb[1] = d_xb_by_patch[current_block.p][1];
xb[2] = d_xb_by_patch[current_block.p][2];
int block_begin = dmprts.off_[current_block.bid];
int block_end = dmprts.off_[current_block.bid + 1];
for (int n : in_block_loop(block_begin, block_end)) {
if (n < block_begin) {
continue;
}
auto prt = dmprts.storage[n];
float xx[3] = {
prt.x[0] + xb[0],
prt.x[1] + xb[1],
prt.x[2] + xb[2],
};
float H = foil(xx, prt.kind);
if (H > 0.f) {
d_particle_kick(prt, H, heating_dt, &local_state);
dmprts.storage.store_momentum(prt, n);
}
}
}
d_curand_states[id] = local_state;
}
// ======================================================================
// cuda_heating_foil
template <typename HS>
struct cuda_heating_foil
{
cuda_heating_foil(const Grid_t& grid, const HS& heating_spot,
double heating_dt)
: heating_dt_(heating_dt), heating_spot_{heating_spot}, first_time_{true}
{}
// no copy constructor / assign, to catch performance issues
cuda_heating_foil(const cuda_heating_foil&) = delete;
cuda_heating_foil& operator=(const cuda_heating_foil&) = delete;
void reset() { first_time_ = true; }
// ----------------------------------------------------------------------
// operator()
template <typename BS>
void operator()(cuda_mparticles<BS>* cmprts)
{
prof_barrier("heating start");
// return cuda_heating_run_foil_gold(cmprts);
if (cmprts->n_prts == 0) {
return;
}
dim3 dimGrid = BlockSimple2<BS, typename HS::dim>::dimGrid(*cmprts);
if (first_time_) { // FIXME
d_xb_by_patch_ = cmprts->xb_by_patch;
d_curand_states_.resize(dimGrid.x * dimGrid.y * dimGrid.z *
THREADS_PER_BLOCK);
static int pr;
if (!pr) {
pr = prof_register("heating_curand", 1., 0, 0);
}
prof_start(pr);
hipLaunchKernelGGL(( k_curand_setup), dim3(dimGrid), dim3(THREADS_PER_BLOCK), 0, 0,
d_curand_states_.data().get());
cuda_sync_if_enabled();
prof_stop(pr);
prof_barrier("heating first");
first_time_ = false;
}
if (cmprts->need_reorder) {
cmprts->reorder();
}
int n_blocks = cmprts->b_mx()[0] * cmprts->b_mx()[1] * cmprts->b_mx()[2] *
cmprts->n_patches();
hipLaunchKernelGGL(( k_heating_run_foil<BS>), dim3(dimGrid), dim3(THREADS_PER_BLOCK), 0, 0,
heating_spot_, *cmprts, heating_dt_, d_xb_by_patch_.data().get(),
d_curand_states_.data().get(), n_blocks);
cuda_sync_if_enabled();
}
// state (FIXME, shouldn't be part of the interface)
bool first_time_;
float heating_dt_;
HS heating_spot_;
psc::device_vector<Float3> d_xb_by_patch_;
psc::device_vector<hiprandState_t> d_curand_states_;
};
// ----------------------------------------------------------------------
// particle_kick
__host__ void particle_kick(DParticleCuda& prt, float H, float heating_dt)
{
float2 r01 = bm_normal2();
float2 r23 = bm_normal2();
float Dp = sqrtf(H * heating_dt);
prt.x[0] += Dp * r01.x;
prt.x[1] += Dp * r01.y;
prt.x[2] += Dp * r23.x;
}
// ----------------------------------------------------------------------
// cuda_heating_run_foil_gold
template <typename BS, typename HS>
void cuda_heating_run_foil_gold(HS& foil, float heating_dt,
cuda_mparticles<BS>* cmprts)
{
for (int b = 0; b < cmprts->n_blocks; b++) {
int p = b / cmprts->n_blocks_per_patch;
for (int n = cmprts->d_off[b]; n < cmprts->d_off[b + 1]; n++) {
auto prt = cmprts->storage[n];
float* xb = &cmprts->xb_by_patch[p][0];
float xx[3] = {
prt.x[0] + xb[0],
prt.x[1] + xb[1],
prt.x[2] + xb[2],
};
float H = foil(xx, prt.kind);
// float4 pxi4 = d_pxi4[n];
// printf("%s xx = %g %g %g H = %g px = %g %g %g\n", (H > 0) ? "H" : " ",
// xx[0], xx[1], xx[2], H,
// pxi4.x, pxi4.y, pxi4.z);
// pxi4.w = H;
// d_pxi4[n] = pxi4;
if (H > 0) {
auto prt = cmprts->storage[n];
particle_kick(prt, H, heating_dt);
cmprts->storage.store_momentum(prt, n);
// printf("H xx = %g %g %g H = %g px = %g %g %g\n", xx[0], xx[1], xx[2],
// H,
// pxi4.x, pxi4.y, pxi4.z);
}
}
}
}
// ======================================================================
template <typename HS, typename MP>
HeatingCuda<HS, MP>::HeatingCuda(const Grid_t& grid, int interval,
HS heating_spot)
: foil_{new cuda_heating_foil<HS>{grid, heating_spot, interval * grid.dt}},
balance_generation_cnt_{-1}
{}
template <typename HS, typename MP>
HeatingCuda<HS, MP>::~HeatingCuda()
{
delete foil_;
}
template <typename HS, typename MP>
void HeatingCuda<HS, MP>::reset(const MP& mprts)
{
foil_->reset();
}
template <typename HS, typename MP>
void HeatingCuda<HS, MP>::operator()(MP& mprts)
{
if (psc_balance_generation_cnt > this->balance_generation_cnt_) {
balance_generation_cnt_ = psc_balance_generation_cnt;
reset(mprts);
}
(*foil_)(mprts.cmprts());
}
// ======================================================================
template struct HeatingCuda<HeatingSpotFoil<dim_yz>, MparticlesCuda<BS144>>;
template struct HeatingCuda<HeatingSpotFoil<dim_xyz>, MparticlesCuda<BS444>>;
| d7015b6643708b9e6a9e749015f666e9f27c45d7.cu |
#include "cuda_iface.h"
#include "cuda_mparticles.cuh"
#include "cuda_bits.h"
#include "cuda_base.cuh"
#include "psc_bits.h"
#include "heating_spot_foil.hxx"
#include "heating_cuda_impl.hxx"
#include "balance.hxx"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <curand_kernel.h>
#include <cstdio>
#define THREADS_PER_BLOCK 128
using Float3 = Vec3<float>;
// ----------------------------------------------------------------------
// bm_normal2
static inline float2 bm_normal2(void)
{
float u1, u2;
do {
u1 = random() * (1.f / RAND_MAX);
u2 = random() * (1.f / RAND_MAX);
} while (u1 <= 0.f);
float2 rv;
rv.x = sqrtf(-2.f * logf(u1)) * cosf(2.f * M_PI * u2);
rv.y = sqrtf(-2.f * logf(u1)) * sinf(2.f * M_PI * u2);
return rv;
}
// ----------------------------------------------------------------------
// k_curand_setup
__global__ static void k_curand_setup(curandState* d_curand_states)
{
int bid = (blockIdx.z * gridDim.y + blockIdx.y) * gridDim.x + blockIdx.x;
int id = threadIdx.x + bid * THREADS_PER_BLOCK;
curand_init(1234, id, 0, &d_curand_states[id]);
}
// ----------------------------------------------------------------------
// d_particle_kick
__device__ void d_particle_kick(DParticleCuda& prt, float H, float heating_dt,
curandState* state)
{
float2 r01 = curand_normal2(state);
float r2 = curand_normal(state);
float Dp = sqrtf(H * heating_dt);
prt.u[0] += Dp * r01.x;
prt.u[1] += Dp * r01.y;
prt.u[2] += Dp * r2;
}
// ----------------------------------------------------------------------
// k_heating_run_foil
template <typename BS, typename HS>
__global__ static void __launch_bounds__(THREADS_PER_BLOCK, 3)
k_heating_run_foil(HS foil, DMparticlesCuda<BS> dmprts, float heating_dt,
Float3* d_xb_by_patch, curandState* d_curand_states,
int n_blocks)
{
BlockSimple2<BS, typename HS::dim> current_block;
/* Copy state to local memory for efficiency */
int bid = blockIdx.x;
int id = threadIdx.x + bid * blockDim.x;
curandState local_state = d_curand_states[id];
for (; bid < n_blocks; bid += gridDim.x) {
current_block.init(dmprts, bid);
Float3 xb; // __shared__
xb[0] = d_xb_by_patch[current_block.p][0];
xb[1] = d_xb_by_patch[current_block.p][1];
xb[2] = d_xb_by_patch[current_block.p][2];
int block_begin = dmprts.off_[current_block.bid];
int block_end = dmprts.off_[current_block.bid + 1];
for (int n : in_block_loop(block_begin, block_end)) {
if (n < block_begin) {
continue;
}
auto prt = dmprts.storage[n];
float xx[3] = {
prt.x[0] + xb[0],
prt.x[1] + xb[1],
prt.x[2] + xb[2],
};
float H = foil(xx, prt.kind);
if (H > 0.f) {
d_particle_kick(prt, H, heating_dt, &local_state);
dmprts.storage.store_momentum(prt, n);
}
}
}
d_curand_states[id] = local_state;
}
// ======================================================================
// cuda_heating_foil
template <typename HS>
struct cuda_heating_foil
{
cuda_heating_foil(const Grid_t& grid, const HS& heating_spot,
double heating_dt)
: heating_dt_(heating_dt), heating_spot_{heating_spot}, first_time_{true}
{}
// no copy constructor / assign, to catch performance issues
cuda_heating_foil(const cuda_heating_foil&) = delete;
cuda_heating_foil& operator=(const cuda_heating_foil&) = delete;
void reset() { first_time_ = true; }
// ----------------------------------------------------------------------
// operator()
template <typename BS>
void operator()(cuda_mparticles<BS>* cmprts)
{
prof_barrier("heating start");
// return cuda_heating_run_foil_gold(cmprts);
if (cmprts->n_prts == 0) {
return;
}
dim3 dimGrid = BlockSimple2<BS, typename HS::dim>::dimGrid(*cmprts);
if (first_time_) { // FIXME
d_xb_by_patch_ = cmprts->xb_by_patch;
d_curand_states_.resize(dimGrid.x * dimGrid.y * dimGrid.z *
THREADS_PER_BLOCK);
static int pr;
if (!pr) {
pr = prof_register("heating_curand", 1., 0, 0);
}
prof_start(pr);
k_curand_setup<<<dimGrid, THREADS_PER_BLOCK>>>(
d_curand_states_.data().get());
cuda_sync_if_enabled();
prof_stop(pr);
prof_barrier("heating first");
first_time_ = false;
}
if (cmprts->need_reorder) {
cmprts->reorder();
}
int n_blocks = cmprts->b_mx()[0] * cmprts->b_mx()[1] * cmprts->b_mx()[2] *
cmprts->n_patches();
k_heating_run_foil<BS><<<dimGrid, THREADS_PER_BLOCK>>>(
heating_spot_, *cmprts, heating_dt_, d_xb_by_patch_.data().get(),
d_curand_states_.data().get(), n_blocks);
cuda_sync_if_enabled();
}
// state (FIXME, shouldn't be part of the interface)
bool first_time_;
float heating_dt_;
HS heating_spot_;
psc::device_vector<Float3> d_xb_by_patch_;
psc::device_vector<curandState> d_curand_states_;
};
// ----------------------------------------------------------------------
// particle_kick
__host__ void particle_kick(DParticleCuda& prt, float H, float heating_dt)
{
float2 r01 = bm_normal2();
float2 r23 = bm_normal2();
float Dp = sqrtf(H * heating_dt);
prt.x[0] += Dp * r01.x;
prt.x[1] += Dp * r01.y;
prt.x[2] += Dp * r23.x;
}
// ----------------------------------------------------------------------
// cuda_heating_run_foil_gold
template <typename BS, typename HS>
void cuda_heating_run_foil_gold(HS& foil, float heating_dt,
cuda_mparticles<BS>* cmprts)
{
for (int b = 0; b < cmprts->n_blocks; b++) {
int p = b / cmprts->n_blocks_per_patch;
for (int n = cmprts->d_off[b]; n < cmprts->d_off[b + 1]; n++) {
auto prt = cmprts->storage[n];
float* xb = &cmprts->xb_by_patch[p][0];
float xx[3] = {
prt.x[0] + xb[0],
prt.x[1] + xb[1],
prt.x[2] + xb[2],
};
float H = foil(xx, prt.kind);
// float4 pxi4 = d_pxi4[n];
// printf("%s xx = %g %g %g H = %g px = %g %g %g\n", (H > 0) ? "H" : " ",
// xx[0], xx[1], xx[2], H,
// pxi4.x, pxi4.y, pxi4.z);
// pxi4.w = H;
// d_pxi4[n] = pxi4;
if (H > 0) {
auto prt = cmprts->storage[n];
particle_kick(prt, H, heating_dt);
cmprts->storage.store_momentum(prt, n);
// printf("H xx = %g %g %g H = %g px = %g %g %g\n", xx[0], xx[1], xx[2],
// H,
// pxi4.x, pxi4.y, pxi4.z);
}
}
}
}
// ======================================================================
template <typename HS, typename MP>
HeatingCuda<HS, MP>::HeatingCuda(const Grid_t& grid, int interval,
HS heating_spot)
: foil_{new cuda_heating_foil<HS>{grid, heating_spot, interval * grid.dt}},
balance_generation_cnt_{-1}
{}
template <typename HS, typename MP>
HeatingCuda<HS, MP>::~HeatingCuda()
{
delete foil_;
}
template <typename HS, typename MP>
void HeatingCuda<HS, MP>::reset(const MP& mprts)
{
foil_->reset();
}
template <typename HS, typename MP>
void HeatingCuda<HS, MP>::operator()(MP& mprts)
{
if (psc_balance_generation_cnt > this->balance_generation_cnt_) {
balance_generation_cnt_ = psc_balance_generation_cnt;
reset(mprts);
}
(*foil_)(mprts.cmprts());
}
// ======================================================================
template struct HeatingCuda<HeatingSpotFoil<dim_yz>, MparticlesCuda<BS144>>;
template struct HeatingCuda<HeatingSpotFoil<dim_xyz>, MparticlesCuda<BS444>>;
|
3b920ed41108e2784d57ecb6eb76910ddd1004b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <stdio.h>
#include <stdlib.h>
#include "gputimer.h"
#include <time.h>
unsigned int filter_radius;
GpuTimer timer;
double overal_time = 0;
clock_t start, end;
double overal_CPU_time;
#define FILTER_LENGTH (2 * filter_radius + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 0.00005
#define cudaCheckError() { \
hipError_t e=hipGetLastError(); \
if(e!=hipSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(float *h_Dst, float *h_Src, float *h_Filter,int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(float *h_Dst, float *h_Src, float *h_Filter,int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Device code
////////////////////////////////////////////////////////////////////////////////
__global__ void
convolutionRowDevice(float *d_Dst, float *d_Src, float *d_Filter,int imageW, int imageH, int filterR)
{
int k;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = row + k;
if (d >= 0 && d < imageW) {
//sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
sum += d_Src[col * imageW + d] * d_Filter[filterR - k];
}
//h_Dst[y * imageW + x] = sum;
d_Dst[col * imageW + row] = sum;
}
}
__global__ void
convolutionColumnDevice(float *d_Dst, float *d_Src, float *d_Filter,int imageW, int imageH, int filterR)
{
int k;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = col + k;
if (d >= 0 && d < imageH) {
//sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
sum += d_Src[d * imageW + row] * d_Filter[filterR -k];
}
//h_Dst[y * imageW + x] = sum;
d_Dst[col * imageW + row] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
float
*h_Filter,
*h_Input,
*h_Buffer,
*h_OutputCPU,
*h_OutputGPU;
float
*d_Filter,
*d_Input,
*d_Buffer,
*d_OutputD;
int imageW;
int imageH;
unsigned int N;
unsigned int i;
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
if ( argc != 3){
printf("Missmach in argument input \n");
printf("1st argument: Image Size \n 2nd argument: Filter Radius \n");
return 0;
}
filter_radius = atoi(argv[1]);
N = atoi(argv[2]);
imageH = N;
imageW = N;
if ( N < FILTER_LENGTH || N%2 != 0 ){
printf ( "Wrong image size \n");
printf ( "It should be greater than %d and a power of 2 \n", FILTER_LENGTH);
return 0;
}
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating host arrays...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
// Host mallocs
h_Filter = (float *)malloc(FILTER_LENGTH * sizeof(float));
h_Input = (float *)malloc(imageW * imageH * sizeof(float));
h_Buffer = (float *)malloc(imageW * imageH * sizeof(float));
h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float));
h_OutputGPU = (float *)malloc(imageW * imageH * sizeof(float));
if ( h_Filter == NULL || h_Input == NULL || h_Buffer == NULL || h_OutputCPU == NULL || h_OutputGPU == NULL) {
fprintf(stderr, "Failed to allocate Host matrices!\n");
exit(EXIT_FAILURE);
}
printf("Allocating Device arrays...\n");
// Device mallocs
d_Filter = NULL;
hipMalloc((void **)&d_Filter, FILTER_LENGTH * sizeof(float));
cudaCheckError();
d_Input = NULL;
hipMalloc((void **)&d_Input, imageW * imageH * sizeof(float));
cudaCheckError();
d_Buffer = NULL;
hipMalloc((void **)&d_Buffer, imageW * imageH * sizeof(float));
cudaCheckError();
d_OutputD = NULL;
hipMalloc((void **)&d_OutputD, imageW * imageH * sizeof(float));
cudaCheckError();
// to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai
// arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai
// to convolution kai arxikopoieitai kai auth tuxaia.
printf("Initializing Host arrays...\n");
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (float)(rand() % 16);
}
for (i = 0; i < imageW * imageH; i++) {
h_Input[i] = (float)rand() / ((float)RAND_MAX / 255) + (float)rand() / (float)RAND_MAX;
}
printf("Initializing Device arrays...\n");
// Transfer Data to Device
timer.Start();
hipMemcpy(d_Filter, h_Filter, FILTER_LENGTH * sizeof(float), hipMemcpyHostToDevice);
timer.Stop();
overal_time = overal_time + timer.Elapsed();
cudaCheckError();
timer.Start();
hipMemcpy(d_Input, h_Input, imageW * imageH * sizeof(float), hipMemcpyHostToDevice);
timer.Stop();
overal_time = overal_time + timer.Elapsed();
cudaCheckError();
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
printf("CPU computation...\n");
start = clock();
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); // convolution kata sthles
end = clock();
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
printf("GPU computation...\n");
// Kernel paramiters prep
int threadsPerBlock;
if (N >= 32){
threadsPerBlock = 32;
}else{
threadsPerBlock = N;
}
dim3 threads(threadsPerBlock, threadsPerBlock);
int blocksPerGrid;
if ( N>=32){
blocksPerGrid = N/threads.x;
}else{
blocksPerGrid = 1;
}
dim3 grid(blocksPerGrid,blocksPerGrid);
// convolution by rows device
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid*blocksPerGrid, threadsPerBlock*threadsPerBlock);
timer.Start();
hipLaunchKernelGGL(( convolutionRowDevice), dim3(grid), dim3(threads), 0, 0, d_Buffer, d_Input, d_Filter, imageW, imageH, filter_radius);
timer.Stop();
overal_time = overal_time + timer.Elapsed();
cudaCheckError();
hipDeviceSynchronize();
cudaCheckError();
// convolution by columns device
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid*blocksPerGrid, threadsPerBlock*threadsPerBlock);
timer.Start();
hipLaunchKernelGGL(( convolutionColumnDevice), dim3(grid), dim3(threads), 0, 0, d_OutputD, d_Buffer, d_Filter, imageW, imageH, filter_radius);
timer.Stop();
overal_time = overal_time + timer.Elapsed();
cudaCheckError();
hipDeviceSynchronize();
cudaCheckError();
// Copy the device result vector in device memory to the host result vector
// in host memorycomment
printf("Copy output data from the CUDA device to the host memory\n");
timer.Start();
hipMemcpy(h_OutputGPU, d_OutputD, imageW * imageH * sizeof(float), hipMemcpyDeviceToHost);
timer.Stop();
overal_time = overal_time + timer.Elapsed();
cudaCheckError();
printf("\nComparing the outputs\n");
float max_diff=0, temp;
for (unsigned i = 0; i < imageW * imageH; i++)
{
temp = ABS(h_OutputCPU[i] - h_OutputGPU[i]);
if (max_diff < temp) {
max_diff = temp;
}
if ( max_diff > accuracy){
printf("The accuracy is not good enough\n" );
break;
}
}
printf("Max diff: %g\n\n", max_diff);
printf("Time elapsed on GPU = %g ms\n", overal_time);
overal_CPU_time = (double)(end - start) * 1000.0 / CLOCKS_PER_SEC ;
printf ("Time elapsed on CPU = %g ms\n", overal_CPU_time);
// free all the allocated memory
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
hipFree(d_OutputD);
cudaCheckError();
hipFree(d_Buffer);
cudaCheckError();
hipFree(d_Input);
cudaCheckError();
hipFree(d_Filter);
cudaCheckError();
// Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA
hipDeviceReset();
return 0;
}
| 3b920ed41108e2784d57ecb6eb76910ddd1004b1.cu | /*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <stdio.h>
#include <stdlib.h>
#include "gputimer.h"
#include <time.h>
unsigned int filter_radius;
GpuTimer timer;
double overal_time = 0;
clock_t start, end;
double overal_CPU_time;
#define FILTER_LENGTH (2 * filter_radius + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 0.00005
#define cudaCheckError() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(float *h_Dst, float *h_Src, float *h_Filter,int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(float *h_Dst, float *h_Src, float *h_Filter,int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Device code
////////////////////////////////////////////////////////////////////////////////
__global__ void
convolutionRowDevice(float *d_Dst, float *d_Src, float *d_Filter,int imageW, int imageH, int filterR)
{
int k;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = row + k;
if (d >= 0 && d < imageW) {
//sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
sum += d_Src[col * imageW + d] * d_Filter[filterR - k];
}
//h_Dst[y * imageW + x] = sum;
d_Dst[col * imageW + row] = sum;
}
}
__global__ void
convolutionColumnDevice(float *d_Dst, float *d_Src, float *d_Filter,int imageW, int imageH, int filterR)
{
int k;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = col + k;
if (d >= 0 && d < imageH) {
//sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
sum += d_Src[d * imageW + row] * d_Filter[filterR -k];
}
//h_Dst[y * imageW + x] = sum;
d_Dst[col * imageW + row] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
float
*h_Filter,
*h_Input,
*h_Buffer,
*h_OutputCPU,
*h_OutputGPU;
float
*d_Filter,
*d_Input,
*d_Buffer,
*d_OutputD;
int imageW;
int imageH;
unsigned int N;
unsigned int i;
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
if ( argc != 3){
printf("Missmach in argument input \n");
printf("1st argument: Image Size \n 2nd argument: Filter Radius \n");
return 0;
}
filter_radius = atoi(argv[1]);
N = atoi(argv[2]);
imageH = N;
imageW = N;
if ( N < FILTER_LENGTH || N%2 != 0 ){
printf ( "Wrong image size \n");
printf ( "It should be greater than %d and a power of 2 \n", FILTER_LENGTH);
return 0;
}
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating host arrays...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
// Host mallocs
h_Filter = (float *)malloc(FILTER_LENGTH * sizeof(float));
h_Input = (float *)malloc(imageW * imageH * sizeof(float));
h_Buffer = (float *)malloc(imageW * imageH * sizeof(float));
h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float));
h_OutputGPU = (float *)malloc(imageW * imageH * sizeof(float));
if ( h_Filter == NULL || h_Input == NULL || h_Buffer == NULL || h_OutputCPU == NULL || h_OutputGPU == NULL) {
fprintf(stderr, "Failed to allocate Host matrices!\n");
exit(EXIT_FAILURE);
}
printf("Allocating Device arrays...\n");
// Device mallocs
d_Filter = NULL;
cudaMalloc((void **)&d_Filter, FILTER_LENGTH * sizeof(float));
cudaCheckError();
d_Input = NULL;
cudaMalloc((void **)&d_Input, imageW * imageH * sizeof(float));
cudaCheckError();
d_Buffer = NULL;
cudaMalloc((void **)&d_Buffer, imageW * imageH * sizeof(float));
cudaCheckError();
d_OutputD = NULL;
cudaMalloc((void **)&d_OutputD, imageW * imageH * sizeof(float));
cudaCheckError();
// to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai
// arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai
// to convolution kai arxikopoieitai kai auth tuxaia.
printf("Initializing Host arrays...\n");
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (float)(rand() % 16);
}
for (i = 0; i < imageW * imageH; i++) {
h_Input[i] = (float)rand() / ((float)RAND_MAX / 255) + (float)rand() / (float)RAND_MAX;
}
printf("Initializing Device arrays...\n");
// Transfer Data to Device
timer.Start();
cudaMemcpy(d_Filter, h_Filter, FILTER_LENGTH * sizeof(float), cudaMemcpyHostToDevice);
timer.Stop();
overal_time = overal_time + timer.Elapsed();
cudaCheckError();
timer.Start();
cudaMemcpy(d_Input, h_Input, imageW * imageH * sizeof(float), cudaMemcpyHostToDevice);
timer.Stop();
overal_time = overal_time + timer.Elapsed();
cudaCheckError();
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
printf("CPU computation...\n");
start = clock();
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); // convolution kata sthles
end = clock();
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
printf("GPU computation...\n");
// Kernel paramiters prep
int threadsPerBlock;
if (N >= 32){
threadsPerBlock = 32;
}else{
threadsPerBlock = N;
}
dim3 threads(threadsPerBlock, threadsPerBlock);
int blocksPerGrid;
if ( N>=32){
blocksPerGrid = N/threads.x;
}else{
blocksPerGrid = 1;
}
dim3 grid(blocksPerGrid,blocksPerGrid);
// convolution by rows device
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid*blocksPerGrid, threadsPerBlock*threadsPerBlock);
timer.Start();
convolutionRowDevice<<<grid, threads>>>(d_Buffer, d_Input, d_Filter, imageW, imageH, filter_radius);
timer.Stop();
overal_time = overal_time + timer.Elapsed();
cudaCheckError();
cudaDeviceSynchronize();
cudaCheckError();
// convolution by columns device
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid*blocksPerGrid, threadsPerBlock*threadsPerBlock);
timer.Start();
convolutionColumnDevice<<<grid, threads>>>(d_OutputD, d_Buffer, d_Filter, imageW, imageH, filter_radius);
timer.Stop();
overal_time = overal_time + timer.Elapsed();
cudaCheckError();
cudaDeviceSynchronize();
cudaCheckError();
// Copy the device result vector in device memory to the host result vector
// in host memorycomment
printf("Copy output data from the CUDA device to the host memory\n");
timer.Start();
cudaMemcpy(h_OutputGPU, d_OutputD, imageW * imageH * sizeof(float), cudaMemcpyDeviceToHost);
timer.Stop();
overal_time = overal_time + timer.Elapsed();
cudaCheckError();
printf("\nComparing the outputs\n");
float max_diff=0, temp;
for (unsigned i = 0; i < imageW * imageH; i++)
{
temp = ABS(h_OutputCPU[i] - h_OutputGPU[i]);
if (max_diff < temp) {
max_diff = temp;
}
if ( max_diff > accuracy){
printf("The accuracy is not good enough\n" );
break;
}
}
printf("Max diff: %g\n\n", max_diff);
printf("Time elapsed on GPU = %g ms\n", overal_time);
overal_CPU_time = (double)(end - start) * 1000.0 / CLOCKS_PER_SEC ;
printf ("Time elapsed on CPU = %g ms\n", overal_CPU_time);
// free all the allocated memory
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
cudaFree(d_OutputD);
cudaCheckError();
cudaFree(d_Buffer);
cudaCheckError();
cudaFree(d_Input);
cudaCheckError();
cudaFree(d_Filter);
cudaCheckError();
// Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA
cudaDeviceReset();
return 0;
}
|
32dcbbc7dba7e2095a565fff0f9b811a64a69a7e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
using namespace PyTorchMemEffAttention;
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, false, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_notaligned_64x64_k32_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, false, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_notaligned_64x64_k32_dropout_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, false, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_notaligned_64x64_k32_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, false, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_notaligned_64x64_k32_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, false, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_notaligned_64x64_k32_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, false, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_notaligned_64x64_k32_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
| 32dcbbc7dba7e2095a565fff0f9b811a64a69a7e.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
using namespace PyTorchMemEffAttention;
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, false, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_notaligned_64x64_k32_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, false, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_notaligned_64x64_k32_dropout_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, false, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_notaligned_64x64_k32_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, false, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_notaligned_64x64_k32_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, false, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_notaligned_64x64_k32_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, false, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_notaligned_64x64_k32_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
ca0cdfa00fb8a073a758496ba6710502182d8598.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ge_sqrt.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int fd = 1;
const REAL *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
REAL *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ge_sqrt), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ge_sqrt), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ge_sqrt), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ca0cdfa00fb8a073a758496ba6710502182d8598.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ge_sqrt.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int fd = 1;
const REAL *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
REAL *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ge_sqrt<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ge_sqrt<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ge_sqrt<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2d4fd2581594bc539c6912f2fa717d26f3959b4a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* serveral useful gpu functions will be defined in this file to facilitate
* the extension scheme
******************************************************************************/
typedef struct
{
double sR;
double sL;
} double_eno_derivative;
__device__ inline
double max2(double x, double y)
{
return (x<y) ? y : x;
}
__device__ inline
double min2(double x, double y)
{
return (x<y) ? x : y;
}
__device__ inline
double min_mod(double x, double y)
{
return (x*y<0) ? 0.0 : (fabs(x)<fabs(y) ? x : y);
}
__device__ inline
double min_abs(double x, double y)
{
return (fabs(x)<fabs(y)) ? x : y;
}
__device__ inline
double sign(double x)
{
return (x>0) ? 1.0 : -1.0;
}
// convert subindex to linear index
// periodic boundary conditions are assumed
__device__ inline
int sub2ind(int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges)
{
int row_idxn = min2(rows-1, max2(0, row_idx));
int col_idxn = min2(cols-1, max2(0, col_idx));
int pge_idxn = min2(pges-1, max2(0, pge_idx));
int ind = pge_idxn * rows * cols + col_idxn * rows + row_idxn;
return ind;
}
__device__ inline
double weno_onesided_derivative(double v1, double v2, double v3, double v4, double v5)
{
// different choices of ENO derivatives
double phi1 = 1./3. * v1 - 7./6. * v2 + 11./6. * v3;
double phi2 = -1./6. * v2 + 5./6. * v3 + 1./3. * v4;
double phi3 = 1./3. * v3 + 5./6. * v4 - 1./6. * v5;
// smoothness parameter
double S1 = 13./12. * pow((v1 - 2*v2 + v3),2) + 1./4. * pow((v1 - 4*v2 + 3*v3),2);
double S2 = 13./12. * pow((v2 - 2*v3 + v4),2) + 1./4. * pow((v2 - v4),2);
double S3 = 13./12. * pow((v3 - 2*v4 + v5),2) + 1./4. * pow((3*v3 - 4*v4 + v5),2);
double epsilon = 1e-6;
double alpha1 = 0.1 / pow( (S1 + epsilon), 2);
double alpha2 = 0.6 / pow( (S2 + epsilon), 2);
double alpha3 = 0.3 / pow( (S3 + epsilon), 2);
// weights for each stencil
double sum = alpha1 + alpha2 + alpha3;
double omega1 = alpha1 / sum;
double omega2 = alpha2 / sum;
double omega3 = alpha3 / sum;
return (omega1*phi1 + omega2*phi2 + omega3*phi3);
}
// given a stencil across the boundary: p1<-l3-p2<-l2-p3<-l1-p4-r1->p5-r2->p6-r3->p7
// create a new stencil (x3m,h3m),(x2m,h2m),(x1m,h1m),(x0,h0),(x1,h1),(x2,h2),(x3,h3) including boundary nodes
__device__ inline
void select_stencil(double & h3m, double & h2m, double & h1m, double & h0, double & h1, double & h2, double & h3, double & x3m, double & x2m, double & x1m, double & x0, double & x1, double & x2, double & x3, double p1, double p2, double p3, double p4, double p5, double p6, double p7, double r1, double r2, double r3, double l1, double l2, double l3, double ds, double h_fore, double h_back)
{
h0 = p4; x0 = 0.0;
if(r1<ds){
x1 = r1;
x2 = ds;
x3 = 2*ds;
h1 = h_fore;
h2 = p5;
h3 = p6;
}else{
x1 = ds;
x2 = 2*ds;
x3 = 3*ds;
h1 = p5;
h2 = p6;
h3 = p7;
}
if(l1<ds){
x1m = -l1;
x2m = - ds;
x3m = - 2*ds;
h1m = h_back;
h2m = p3;
h3m = p2;
}else{
x1m = -ds;
x2m = - 2*ds;
x3m = - 3*ds;
h1m = p3;
h2m = p2;
h3m = p1;
}
}
// for stencil (x3m,h3m),(x2m,h2m),(x1m,h1m),(x0,h0),(x1,h1),(x2,h2),(x3,h3) including boundary nodes
// calculate cubic eno derivatives at (x0,h0)
// note that this is a nonuniform stencil
__device__ inline
void ENO_cubic_derivative(double & d_fore, double & d_back, double h3m, double h2m, double h1m, double h0, double h1, double h2, double h3, double x3m, double x2m, double x1m, double x0, double x1, double x2, double x3)
{
// divided differences
double d1_2_5 = (h3 - h2) / (x3 - x2) ;
double d1_1_5 = (h2 - h1) / (x2 - x1) ;
double d1_0_5 = (h1 - h0) / (x1 - x0) ;
double d1_m0_5 = (h0 - h1m) / (x0 - x1m);
double d1_m1_5 = (h1m - h2m) / (x1m - x2m);
double d1_m2_5 = (h2m - h3m) / (x2m - x3m);
double d2_2 = (d1_2_5 - d1_1_5) / (x3 - x1) ;
double d2_1 = (d1_1_5 - d1_0_5) / (x2 - x0) ;
double d2_0 = (d1_0_5 - d1_m0_5) / (x1 - x1m);
double d2_m1 = (d1_m0_5 - d1_m1_5) / (x0 - x2m);
double d2_m2 = (d1_m1_5 - d1_m2_5) / (x1m - x3m);
double d3_1_5 = (d2_2 - d2_1) / (x3 - x0) ;
double d3_0_5 = (d2_1 - d2_0) / (x2 - x1m);
double d3_m0_5 = (d2_0 - d2_m1) / (x1 - x2m);
double d3_m1_5 = (d2_m1 - d2_m2) / (x0 - x3m);
double a1 = (x0 - x1m) * (x0 - x2m) * min_abs(d3_m0_5, d3_m1_5);
double a2 = (x0 - x1m) * (x0 - x1) * min_abs(d3_m0_5, d3_0_5);
double a = (fabs(d2_m1) < fabs(d2_0)) ? a1 : a2;
double b1 = (x0 - x1m) * (x0 - x1) * min_abs(d3_m0_5, d3_0_5);
double b2 = (x0 - x1) * (x0 - x2) * min_abs(d3_0_5, d3_1_5);
double b = (fabs(d2_0) < fabs(d2_1)) ? b1 : b2;
d_back = d1_m0_5 + min_mod(d2_m1,d2_0) * (x0 - x1m) + a;
d_fore = d1_0_5 + min_mod(d2_0, d2_1) * (x0 - x1) + b;
}
// calculate weno derivative at p4: p1<-l3-p2<-l2-p3<-l1-p4-r1->p5-r2->p6-r3->p7
// where px are level set function values at node x
// lx, rx are distance to the left/right node
__device__ inline
void weno_derivative_boundary(double & d_fore, double & d_back, double p1, double p2, double p3, double p4, double p5, double p6, double p7, double r1, double r2, double r3, double l1, double l2, double l3, double ds, double v_fore, double v_back)
{
// the condistion below is better than p3*p4<0 || p4*p5<0
bool cross_interface = r1<ds || l1<ds;
if(!cross_interface){
double v1 = (p2 - p1) / ds;
double v2 = (p3 - p2) / ds;
double v3 = (p4 - p3) / ds;
double v4 = (p5 - p4) / ds;
double v5 = (p6 - p5) / ds;
double v6 = (p7 - p6) / ds;
d_back = weno_onesided_derivative(v1,v2,v3,v4,v5);
d_fore = weno_onesided_derivative(v6,v5,v4,v3,v2);
}// if not a node IMMEDIATELY adjacent to the boundary, calculate weno derivatives as usual
else{
double h3m,h2m,h1m,h0,h1,h2,h3;
double x3m,x2m,x1m,x0,x1,x2,x3;
select_stencil(h3m,h2m,h1m,h0,h1,h2,h3,x3m,x2m,x1m,x0,x1,x2,x3,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,ds,v_fore,v_back);
ENO_cubic_derivative(d_fore,d_back,h3m,h2m,h1m,h0,h1,h2,h3,x3m,x2m,x1m,x0,x1,x2,x3);
}// for nodes IMMEDIATELY adjacent to the boundary, use cubic ENO interpolant
}
__device__ inline
double upwind_normal_point(double p1, double p2, double p3, double p4, double p5, double p6, double p7, double r1, double r2, double r3, double l1, double l2, double l3, double ds)
{
double d_back, d_fore;
weno_derivative_boundary(d_fore,d_back,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,ds,0.0,0.0);
return (fabs(p5)<fabs(p3)) ? d_fore : d_back;
}
// calculate the upwind normal
__global__
void upwind_normal(double * nx, double * ny, double * nz, double const * lsf, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
double p1,p2,p3,p4,p5,p6,p7;
double r1,r2,r3,l1,l2,l3;
p4 = lsf[ind];
int rght1 = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int rght2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int rght3 = sub2ind(row_idx, col_idx+3, pge_idx, rows, cols, pges);
int left1 = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
int left3 = sub2ind(row_idx, col_idx-3, pge_idx, rows, cols, pges);
p1 = lsf[left3];
p2 = lsf[left2];
p3 = lsf[left1];
p5 = lsf[rght1];
p6 = lsf[rght2];
p7 = lsf[rght3];
r1 = xpr[ind];
r2 = xpr[rght1];
r3 = xpr[rght2];
l1 = xpl[ind];
l2 = xpl[left1];
l3 = xpl[left2];
nx[ind] = upwind_normal_point(p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dx);
int frnt1 = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int frnt2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int frnt3 = sub2ind(row_idx+3, col_idx, pge_idx, rows, cols, pges);
int back1 = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
int back3 = sub2ind(row_idx-3, col_idx, pge_idx, rows, cols, pges);
p1 = lsf[back3];
p2 = lsf[back2];
p3 = lsf[back1];
p5 = lsf[frnt1];
p6 = lsf[frnt2];
p7 = lsf[frnt3];
r1 = ypf[ind];
r2 = ypf[frnt1];
r3 = ypf[frnt2];
l1 = ypb[ind];
l2 = ypb[back1];
l3 = ypb[back2];
ny[ind] = upwind_normal_point(p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dy);
int upup1 = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int upup2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int upup3 = sub2ind(row_idx, col_idx, pge_idx+3, rows, cols, pges);
int down1 = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
int down3 = sub2ind(row_idx, col_idx, pge_idx-3, rows, cols, pges);
p1 = lsf[down3];
p2 = lsf[down2];
p3 = lsf[down1];
p5 = lsf[upup1];
p6 = lsf[upup2];
p7 = lsf[upup3];
r1 = zpu[ind];
r2 = zpu[upup1];
r3 = zpu[upup2];
l1 = zpd[ind];
l2 = zpd[down1];
l3 = zpd[down2];
nz[ind] = upwind_normal_point(p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dz);
}
__device__ inline
void cubic_interp_coefficient(double & c0, double & c1, double & c2, double & c3, double v0, double v1, double v2, double v3, double s)
{
c0 = v0;
c1 = ( 3.0 * (v1-v0) - 3.0/2.0 * (v2-v0) + 1.0/3.0 * (v3-v0) ) / s;
c2 = (-5.0/2.0 * (v1-v0) + 2.0 * (v2-v0) - 1.0/2.0 * (v3-v0) ) / pow(s,2);
c3 = ( 1.0/2.0 * (v1-v0) - 1.0/2.0 * (v2-v0) + 1.0/6.0 * (v3-v0) ) / pow(s,3);
}
// modify forward/backward(c_f/b) values at v0:[v4,v1,v0,v2,v3]
// if dis_b/f!=ds, then there is boundary nearby, a cubic interpolant is then constructed
// C(x) = c0 + c1*x + c2*x^2 + c3*c^3 through (0,v1),(ds,v0),(2*ds,v2),(3*ds,v2) assuming boundary is between v0,v2
// and used to calculate c_b/f at boundary crossing nodes
__device__ inline
void cubic_interp(double & c_forward, double & c_backward, double dis_f, double dis_b, double ds, double v4, double v1, double v0, double v2, double v3)
{
c_forward = 0;
c_backward = 0;
double c0,c1,c2,c3; // coefficient for cubic interpolant
// if there is a boundary in the forward direction
if(dis_f!=ds){
cubic_interp_coefficient(c0,c1,c2,c3,v1,v0,v2,v3,ds);
double xc = ds + dis_f; // coordinate of the boundary point
c_forward = c0 + c1 * xc + c2 * pow(xc,2) + c3 * pow(xc,3);
}
// if there is a boundary in the backward direction
if(dis_b!=ds){
cubic_interp_coefficient(c0,c1,c2,c3,v4,v1,v0,v2,ds);
double xc = 2*ds - dis_b;
c_backward = c0 + c1 * xc + c2 * pow(xc,2) + c3 * pow(xc,3);
}
}
// interpolate values at boundary points
__global__
void boundary_interpolate(double * cpr, double * cpl, double * cpf, double * cpb, double * cpu, double * cpd, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, double const * lsf, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
cubic_interp(cpr[ind],cpl[ind],xpr[ind],xpl[ind],dx,lsf[left2],lsf[left],lsf[ind],lsf[right],lsf[right2]);
int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
cubic_interp(cpf[ind],cpb[ind],ypf[ind],ypb[ind],dy,lsf[back2],lsf[back],lsf[ind],lsf[front],lsf[front2]);
int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
cubic_interp(cpu[ind],cpd[ind],zpu[ind],zpd[ind],dz,lsf[down2],lsf[down],lsf[ind],lsf[up],lsf[up2]);
}
// calculate extend step
// now lsf represents a scalar field (not the level set function)
__global__
void extend_step(double * step, double const * deltat, double const * lsf, double const * vx, double const * vy, double const * vz, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, double const * cpr, double const * cpl, double const * cpf, double const * cpb, double const * cpu, double const * cpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
double p1,p2,p3,p4,p5,p6,p7;
double r1,r2,r3,l1,l2,l3;
double v_fore, v_back;
p4 = lsf[ind];
int rght1 = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int rght2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int rght3 = sub2ind(row_idx, col_idx+3, pge_idx, rows, cols, pges);
int left1 = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
int left3 = sub2ind(row_idx, col_idx-3, pge_idx, rows, cols, pges);
p1 = lsf[left3];
p2 = lsf[left2];
p3 = lsf[left1];
p5 = lsf[rght1];
p6 = lsf[rght2];
p7 = lsf[rght3];
r1 = xpr[ind];
r2 = xpr[rght1];
r3 = xpr[rght2];
l1 = xpl[ind];
l2 = xpl[left1];
l3 = xpl[left2];
v_fore = cpr[ind];
v_back = cpl[ind];
double xR, xL;
weno_derivative_boundary(xR,xL,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dx,v_fore,v_back);
int frnt1 = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int frnt2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int frnt3 = sub2ind(row_idx+3, col_idx, pge_idx, rows, cols, pges);
int back1 = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
int back3 = sub2ind(row_idx-3, col_idx, pge_idx, rows, cols, pges);
p1 = lsf[back3];
p2 = lsf[back2];
p3 = lsf[back1];
p5 = lsf[frnt1];
p6 = lsf[frnt2];
p7 = lsf[frnt3];
r1 = ypf[ind];
r2 = ypf[frnt1];
r3 = ypf[frnt2];
l1 = ypb[ind];
l2 = ypb[back1];
l3 = ypb[back2];
v_fore = cpf[ind];
v_back = cpb[ind];
double yF, yB;
weno_derivative_boundary(yF,yB,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dy,v_fore,v_back);
int upup1 = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int upup2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int upup3 = sub2ind(row_idx, col_idx, pge_idx+3, rows, cols, pges);
int down1 = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
int down3 = sub2ind(row_idx, col_idx, pge_idx-3, rows, cols, pges);
p1 = lsf[down3];
p2 = lsf[down2];
p3 = lsf[down1];
p5 = lsf[upup1];
p6 = lsf[upup2];
p7 = lsf[upup3];
r1 = zpu[ind];
r2 = zpu[upup1];
r3 = zpu[upup2];
l1 = zpd[ind];
l2 = zpd[down1];
l3 = zpd[down2];
v_fore = cpu[ind];
v_back = cpd[ind];
double zU, zD;
weno_derivative_boundary(zU,zD,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dz,v_fore,v_back);
step[ind] = (min2(0,vx[ind]) * xR + max2(0,vx[ind]) * xL +
min2(0,vy[ind]) * yF + max2(0,vy[ind]) * yB +
min2(0,vz[ind]) * zU + max2(0,vz[ind]) * zD ) * deltat[ind];
}
| 2d4fd2581594bc539c6912f2fa717d26f3959b4a.cu | /*******************************************************************************
* serveral useful gpu functions will be defined in this file to facilitate
* the extension scheme
******************************************************************************/
typedef struct
{
double sR;
double sL;
} double_eno_derivative;
__device__ inline
double max2(double x, double y)
{
return (x<y) ? y : x;
}
__device__ inline
double min2(double x, double y)
{
return (x<y) ? x : y;
}
__device__ inline
double min_mod(double x, double y)
{
return (x*y<0) ? 0.0 : (fabs(x)<fabs(y) ? x : y);
}
__device__ inline
double min_abs(double x, double y)
{
return (fabs(x)<fabs(y)) ? x : y;
}
__device__ inline
double sign(double x)
{
return (x>0) ? 1.0 : -1.0;
}
// convert subindex to linear index
// periodic boundary conditions are assumed
__device__ inline
int sub2ind(int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges)
{
int row_idxn = min2(rows-1, max2(0, row_idx));
int col_idxn = min2(cols-1, max2(0, col_idx));
int pge_idxn = min2(pges-1, max2(0, pge_idx));
int ind = pge_idxn * rows * cols + col_idxn * rows + row_idxn;
return ind;
}
__device__ inline
double weno_onesided_derivative(double v1, double v2, double v3, double v4, double v5)
{
// different choices of ENO derivatives
double phi1 = 1./3. * v1 - 7./6. * v2 + 11./6. * v3;
double phi2 = -1./6. * v2 + 5./6. * v3 + 1./3. * v4;
double phi3 = 1./3. * v3 + 5./6. * v4 - 1./6. * v5;
// smoothness parameter
double S1 = 13./12. * pow((v1 - 2*v2 + v3),2) + 1./4. * pow((v1 - 4*v2 + 3*v3),2);
double S2 = 13./12. * pow((v2 - 2*v3 + v4),2) + 1./4. * pow((v2 - v4),2);
double S3 = 13./12. * pow((v3 - 2*v4 + v5),2) + 1./4. * pow((3*v3 - 4*v4 + v5),2);
double epsilon = 1e-6;
double alpha1 = 0.1 / pow( (S1 + epsilon), 2);
double alpha2 = 0.6 / pow( (S2 + epsilon), 2);
double alpha3 = 0.3 / pow( (S3 + epsilon), 2);
// weights for each stencil
double sum = alpha1 + alpha2 + alpha3;
double omega1 = alpha1 / sum;
double omega2 = alpha2 / sum;
double omega3 = alpha3 / sum;
return (omega1*phi1 + omega2*phi2 + omega3*phi3);
}
// given a stencil across the boundary: p1<-l3-p2<-l2-p3<-l1-p4-r1->p5-r2->p6-r3->p7
// create a new stencil (x3m,h3m),(x2m,h2m),(x1m,h1m),(x0,h0),(x1,h1),(x2,h2),(x3,h3) including boundary nodes
__device__ inline
void select_stencil(double & h3m, double & h2m, double & h1m, double & h0, double & h1, double & h2, double & h3, double & x3m, double & x2m, double & x1m, double & x0, double & x1, double & x2, double & x3, double p1, double p2, double p3, double p4, double p5, double p6, double p7, double r1, double r2, double r3, double l1, double l2, double l3, double ds, double h_fore, double h_back)
{
h0 = p4; x0 = 0.0;
if(r1<ds){
x1 = r1;
x2 = ds;
x3 = 2*ds;
h1 = h_fore;
h2 = p5;
h3 = p6;
}else{
x1 = ds;
x2 = 2*ds;
x3 = 3*ds;
h1 = p5;
h2 = p6;
h3 = p7;
}
if(l1<ds){
x1m = -l1;
x2m = - ds;
x3m = - 2*ds;
h1m = h_back;
h2m = p3;
h3m = p2;
}else{
x1m = -ds;
x2m = - 2*ds;
x3m = - 3*ds;
h1m = p3;
h2m = p2;
h3m = p1;
}
}
// for stencil (x3m,h3m),(x2m,h2m),(x1m,h1m),(x0,h0),(x1,h1),(x2,h2),(x3,h3) including boundary nodes
// calculate cubic eno derivatives at (x0,h0)
// note that this is a nonuniform stencil
__device__ inline
void ENO_cubic_derivative(double & d_fore, double & d_back, double h3m, double h2m, double h1m, double h0, double h1, double h2, double h3, double x3m, double x2m, double x1m, double x0, double x1, double x2, double x3)
{
// divided differences
double d1_2_5 = (h3 - h2) / (x3 - x2) ;
double d1_1_5 = (h2 - h1) / (x2 - x1) ;
double d1_0_5 = (h1 - h0) / (x1 - x0) ;
double d1_m0_5 = (h0 - h1m) / (x0 - x1m);
double d1_m1_5 = (h1m - h2m) / (x1m - x2m);
double d1_m2_5 = (h2m - h3m) / (x2m - x3m);
double d2_2 = (d1_2_5 - d1_1_5) / (x3 - x1) ;
double d2_1 = (d1_1_5 - d1_0_5) / (x2 - x0) ;
double d2_0 = (d1_0_5 - d1_m0_5) / (x1 - x1m);
double d2_m1 = (d1_m0_5 - d1_m1_5) / (x0 - x2m);
double d2_m2 = (d1_m1_5 - d1_m2_5) / (x1m - x3m);
double d3_1_5 = (d2_2 - d2_1) / (x3 - x0) ;
double d3_0_5 = (d2_1 - d2_0) / (x2 - x1m);
double d3_m0_5 = (d2_0 - d2_m1) / (x1 - x2m);
double d3_m1_5 = (d2_m1 - d2_m2) / (x0 - x3m);
double a1 = (x0 - x1m) * (x0 - x2m) * min_abs(d3_m0_5, d3_m1_5);
double a2 = (x0 - x1m) * (x0 - x1) * min_abs(d3_m0_5, d3_0_5);
double a = (fabs(d2_m1) < fabs(d2_0)) ? a1 : a2;
double b1 = (x0 - x1m) * (x0 - x1) * min_abs(d3_m0_5, d3_0_5);
double b2 = (x0 - x1) * (x0 - x2) * min_abs(d3_0_5, d3_1_5);
double b = (fabs(d2_0) < fabs(d2_1)) ? b1 : b2;
d_back = d1_m0_5 + min_mod(d2_m1,d2_0) * (x0 - x1m) + a;
d_fore = d1_0_5 + min_mod(d2_0, d2_1) * (x0 - x1) + b;
}
// calculate weno derivative at p4: p1<-l3-p2<-l2-p3<-l1-p4-r1->p5-r2->p6-r3->p7
// where px are level set function values at node x
// lx, rx are distance to the left/right node
__device__ inline
void weno_derivative_boundary(double & d_fore, double & d_back, double p1, double p2, double p3, double p4, double p5, double p6, double p7, double r1, double r2, double r3, double l1, double l2, double l3, double ds, double v_fore, double v_back)
{
// the condistion below is better than p3*p4<0 || p4*p5<0
bool cross_interface = r1<ds || l1<ds;
if(!cross_interface){
double v1 = (p2 - p1) / ds;
double v2 = (p3 - p2) / ds;
double v3 = (p4 - p3) / ds;
double v4 = (p5 - p4) / ds;
double v5 = (p6 - p5) / ds;
double v6 = (p7 - p6) / ds;
d_back = weno_onesided_derivative(v1,v2,v3,v4,v5);
d_fore = weno_onesided_derivative(v6,v5,v4,v3,v2);
}// if not a node IMMEDIATELY adjacent to the boundary, calculate weno derivatives as usual
else{
double h3m,h2m,h1m,h0,h1,h2,h3;
double x3m,x2m,x1m,x0,x1,x2,x3;
select_stencil(h3m,h2m,h1m,h0,h1,h2,h3,x3m,x2m,x1m,x0,x1,x2,x3,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,ds,v_fore,v_back);
ENO_cubic_derivative(d_fore,d_back,h3m,h2m,h1m,h0,h1,h2,h3,x3m,x2m,x1m,x0,x1,x2,x3);
}// for nodes IMMEDIATELY adjacent to the boundary, use cubic ENO interpolant
}
__device__ inline
double upwind_normal_point(double p1, double p2, double p3, double p4, double p5, double p6, double p7, double r1, double r2, double r3, double l1, double l2, double l3, double ds)
{
double d_back, d_fore;
weno_derivative_boundary(d_fore,d_back,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,ds,0.0,0.0);
return (fabs(p5)<fabs(p3)) ? d_fore : d_back;
}
// calculate the upwind normal
__global__
void upwind_normal(double * nx, double * ny, double * nz, double const * lsf, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
double p1,p2,p3,p4,p5,p6,p7;
double r1,r2,r3,l1,l2,l3;
p4 = lsf[ind];
int rght1 = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int rght2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int rght3 = sub2ind(row_idx, col_idx+3, pge_idx, rows, cols, pges);
int left1 = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
int left3 = sub2ind(row_idx, col_idx-3, pge_idx, rows, cols, pges);
p1 = lsf[left3];
p2 = lsf[left2];
p3 = lsf[left1];
p5 = lsf[rght1];
p6 = lsf[rght2];
p7 = lsf[rght3];
r1 = xpr[ind];
r2 = xpr[rght1];
r3 = xpr[rght2];
l1 = xpl[ind];
l2 = xpl[left1];
l3 = xpl[left2];
nx[ind] = upwind_normal_point(p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dx);
int frnt1 = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int frnt2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int frnt3 = sub2ind(row_idx+3, col_idx, pge_idx, rows, cols, pges);
int back1 = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
int back3 = sub2ind(row_idx-3, col_idx, pge_idx, rows, cols, pges);
p1 = lsf[back3];
p2 = lsf[back2];
p3 = lsf[back1];
p5 = lsf[frnt1];
p6 = lsf[frnt2];
p7 = lsf[frnt3];
r1 = ypf[ind];
r2 = ypf[frnt1];
r3 = ypf[frnt2];
l1 = ypb[ind];
l2 = ypb[back1];
l3 = ypb[back2];
ny[ind] = upwind_normal_point(p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dy);
int upup1 = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int upup2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int upup3 = sub2ind(row_idx, col_idx, pge_idx+3, rows, cols, pges);
int down1 = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
int down3 = sub2ind(row_idx, col_idx, pge_idx-3, rows, cols, pges);
p1 = lsf[down3];
p2 = lsf[down2];
p3 = lsf[down1];
p5 = lsf[upup1];
p6 = lsf[upup2];
p7 = lsf[upup3];
r1 = zpu[ind];
r2 = zpu[upup1];
r3 = zpu[upup2];
l1 = zpd[ind];
l2 = zpd[down1];
l3 = zpd[down2];
nz[ind] = upwind_normal_point(p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dz);
}
__device__ inline
void cubic_interp_coefficient(double & c0, double & c1, double & c2, double & c3, double v0, double v1, double v2, double v3, double s)
{
c0 = v0;
c1 = ( 3.0 * (v1-v0) - 3.0/2.0 * (v2-v0) + 1.0/3.0 * (v3-v0) ) / s;
c2 = (-5.0/2.0 * (v1-v0) + 2.0 * (v2-v0) - 1.0/2.0 * (v3-v0) ) / pow(s,2);
c3 = ( 1.0/2.0 * (v1-v0) - 1.0/2.0 * (v2-v0) + 1.0/6.0 * (v3-v0) ) / pow(s,3);
}
// modify forward/backward(c_f/b) values at v0:[v4,v1,v0,v2,v3]
// if dis_b/f!=ds, then there is boundary nearby, a cubic interpolant is then constructed
// C(x) = c0 + c1*x + c2*x^2 + c3*c^3 through (0,v1),(ds,v0),(2*ds,v2),(3*ds,v2) assuming boundary is between v0,v2
// and used to calculate c_b/f at boundary crossing nodes
__device__ inline
void cubic_interp(double & c_forward, double & c_backward, double dis_f, double dis_b, double ds, double v4, double v1, double v0, double v2, double v3)
{
c_forward = 0;
c_backward = 0;
double c0,c1,c2,c3; // coefficient for cubic interpolant
// if there is a boundary in the forward direction
if(dis_f!=ds){
cubic_interp_coefficient(c0,c1,c2,c3,v1,v0,v2,v3,ds);
double xc = ds + dis_f; // coordinate of the boundary point
c_forward = c0 + c1 * xc + c2 * pow(xc,2) + c3 * pow(xc,3);
}
// if there is a boundary in the backward direction
if(dis_b!=ds){
cubic_interp_coefficient(c0,c1,c2,c3,v4,v1,v0,v2,ds);
double xc = 2*ds - dis_b;
c_backward = c0 + c1 * xc + c2 * pow(xc,2) + c3 * pow(xc,3);
}
}
// interpolate values at boundary points
__global__
void boundary_interpolate(double * cpr, double * cpl, double * cpf, double * cpb, double * cpu, double * cpd, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, double const * lsf, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
cubic_interp(cpr[ind],cpl[ind],xpr[ind],xpl[ind],dx,lsf[left2],lsf[left],lsf[ind],lsf[right],lsf[right2]);
int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
cubic_interp(cpf[ind],cpb[ind],ypf[ind],ypb[ind],dy,lsf[back2],lsf[back],lsf[ind],lsf[front],lsf[front2]);
int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
cubic_interp(cpu[ind],cpd[ind],zpu[ind],zpd[ind],dz,lsf[down2],lsf[down],lsf[ind],lsf[up],lsf[up2]);
}
// calculate extend step
// now lsf represents a scalar field (not the level set function)
__global__
void extend_step(double * step, double const * deltat, double const * lsf, double const * vx, double const * vy, double const * vz, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, double const * cpr, double const * cpl, double const * cpf, double const * cpb, double const * cpu, double const * cpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
double p1,p2,p3,p4,p5,p6,p7;
double r1,r2,r3,l1,l2,l3;
double v_fore, v_back;
p4 = lsf[ind];
int rght1 = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int rght2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int rght3 = sub2ind(row_idx, col_idx+3, pge_idx, rows, cols, pges);
int left1 = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
int left3 = sub2ind(row_idx, col_idx-3, pge_idx, rows, cols, pges);
p1 = lsf[left3];
p2 = lsf[left2];
p3 = lsf[left1];
p5 = lsf[rght1];
p6 = lsf[rght2];
p7 = lsf[rght3];
r1 = xpr[ind];
r2 = xpr[rght1];
r3 = xpr[rght2];
l1 = xpl[ind];
l2 = xpl[left1];
l3 = xpl[left2];
v_fore = cpr[ind];
v_back = cpl[ind];
double xR, xL;
weno_derivative_boundary(xR,xL,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dx,v_fore,v_back);
int frnt1 = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int frnt2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int frnt3 = sub2ind(row_idx+3, col_idx, pge_idx, rows, cols, pges);
int back1 = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
int back3 = sub2ind(row_idx-3, col_idx, pge_idx, rows, cols, pges);
p1 = lsf[back3];
p2 = lsf[back2];
p3 = lsf[back1];
p5 = lsf[frnt1];
p6 = lsf[frnt2];
p7 = lsf[frnt3];
r1 = ypf[ind];
r2 = ypf[frnt1];
r3 = ypf[frnt2];
l1 = ypb[ind];
l2 = ypb[back1];
l3 = ypb[back2];
v_fore = cpf[ind];
v_back = cpb[ind];
double yF, yB;
weno_derivative_boundary(yF,yB,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dy,v_fore,v_back);
int upup1 = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int upup2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int upup3 = sub2ind(row_idx, col_idx, pge_idx+3, rows, cols, pges);
int down1 = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
int down3 = sub2ind(row_idx, col_idx, pge_idx-3, rows, cols, pges);
p1 = lsf[down3];
p2 = lsf[down2];
p3 = lsf[down1];
p5 = lsf[upup1];
p6 = lsf[upup2];
p7 = lsf[upup3];
r1 = zpu[ind];
r2 = zpu[upup1];
r3 = zpu[upup2];
l1 = zpd[ind];
l2 = zpd[down1];
l3 = zpd[down2];
v_fore = cpu[ind];
v_back = cpd[ind];
double zU, zD;
weno_derivative_boundary(zU,zD,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dz,v_fore,v_back);
step[ind] = (min2(0,vx[ind]) * xR + max2(0,vx[ind]) * xL +
min2(0,vy[ind]) * yF + max2(0,vy[ind]) * yB +
min2(0,vz[ind]) * zU + max2(0,vz[ind]) * zD ) * deltat[ind];
}
|
152c910a9de19f6854dd44bd25fe6d4afe5d9e98.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_plugin_helper.h"
using namespace himan;
template <typename T>
__global__ void TransformerKernel(const T* __restrict__ d_source, T* __restrict__ d_dest, double scale, double base,
size_t N)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
d_dest[idx] = __fma_rn(d_source[idx], static_cast<T>(scale), static_cast<T>(base));
}
}
namespace transformergpu
{
template <typename T>
void Process(std::shared_ptr<const himan::plugin_configuration> conf, std::shared_ptr<info<T>> myTargetInfo,
std::shared_ptr<info<T>> sourceInfo, double scale, double base)
{
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
const size_t N = myTargetInfo->SizeLocations();
size_t memsize = N * sizeof(T);
// Allocate device arrays
T *d_source = 0, *d_dest = 0;
// Allocate memory on device
CUDA_CHECK(hipMalloc((void**)&d_source, memsize));
CUDA_CHECK(hipMalloc((void**)&d_dest, memsize));
// Copy data to device
cuda::PrepareInfo(sourceInfo, d_source, stream, conf->UseCacheForReads());
// dims
const int blockSize = 512;
const int gridSize = N / blockSize + (N % blockSize == 0 ? 0 : 1);
CUDA_CHECK(hipStreamSynchronize(stream));
hipLaunchKernelGGL(( TransformerKernel<T>), dim3(gridSize), dim3(blockSize), 0, stream, d_source, d_dest, scale, base, N);
cuda::ReleaseInfo(myTargetInfo, d_dest, stream);
// block until the stream has completed
CUDA_CHECK(hipStreamSynchronize(stream));
CUDA_CHECK(hipFree(d_source));
CUDA_CHECK(hipFree(d_dest));
hipStreamDestroy(stream);
}
template void Process(std::shared_ptr<const himan::plugin_configuration>, std::shared_ptr<info<double>>,
std::shared_ptr<info<double>>, double, double);
template void Process(std::shared_ptr<const himan::plugin_configuration>, std::shared_ptr<info<float>>,
std::shared_ptr<info<float>>, double, double);
} // namespace transformergpu
| 152c910a9de19f6854dd44bd25fe6d4afe5d9e98.cu | #include "cuda_plugin_helper.h"
using namespace himan;
template <typename T>
__global__ void TransformerKernel(const T* __restrict__ d_source, T* __restrict__ d_dest, double scale, double base,
size_t N)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
d_dest[idx] = __fma_rn(d_source[idx], static_cast<T>(scale), static_cast<T>(base));
}
}
namespace transformergpu
{
template <typename T>
void Process(std::shared_ptr<const himan::plugin_configuration> conf, std::shared_ptr<info<T>> myTargetInfo,
std::shared_ptr<info<T>> sourceInfo, double scale, double base)
{
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
const size_t N = myTargetInfo->SizeLocations();
size_t memsize = N * sizeof(T);
// Allocate device arrays
T *d_source = 0, *d_dest = 0;
// Allocate memory on device
CUDA_CHECK(cudaMalloc((void**)&d_source, memsize));
CUDA_CHECK(cudaMalloc((void**)&d_dest, memsize));
// Copy data to device
cuda::PrepareInfo(sourceInfo, d_source, stream, conf->UseCacheForReads());
// dims
const int blockSize = 512;
const int gridSize = N / blockSize + (N % blockSize == 0 ? 0 : 1);
CUDA_CHECK(cudaStreamSynchronize(stream));
TransformerKernel<T><<<gridSize, blockSize, 0, stream>>>(d_source, d_dest, scale, base, N);
cuda::ReleaseInfo(myTargetInfo, d_dest, stream);
// block until the stream has completed
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaFree(d_source));
CUDA_CHECK(cudaFree(d_dest));
cudaStreamDestroy(stream);
}
template void Process(std::shared_ptr<const himan::plugin_configuration>, std::shared_ptr<info<double>>,
std::shared_ptr<info<double>>, double, double);
template void Process(std::shared_ptr<const himan::plugin_configuration>, std::shared_ptr<info<float>>,
std::shared_ptr<info<float>>, double, double);
} // namespace transformergpu
|
9fee91a5b0b67116366b0737f5ac103563a7e5f6.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/linear_model/ols_mg.hpp>
#include <cuml/linear_model/preprocess_mg.hpp>
#include <opg/linalg/lstsq.hpp>
#include <opg/stats/mean.hpp>
#include <raft/comms/comms.hpp>
#include <raft/cuda_utils.cuh>
#include <raft/linalg/add.cuh>
#include <raft/linalg/gemm.cuh>
#include <raft/matrix/math.cuh>
#include <raft/matrix/matrix.cuh>
#include <raft/mr/device/allocator.hpp>
#include <raft/mr/host/allocator.hpp>
#include <rmm/device_uvector.hpp>
#include <cstddef>
using namespace MLCommon;
namespace ML {
namespace OLS {
namespace opg {
template <typename T>
void fit_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input_data,
Matrix::PartDescriptor& input_desc,
std::vector<Matrix::Data<T>*>& labels,
T* coef,
T* intercept,
bool fit_intercept,
bool normalize,
int algo,
hipStream_t* streams,
int n_streams,
bool verbose)
{
rmm::device_uvector<T> mu_input(0, streams[0]);
rmm::device_uvector<T> norm2_input(0, streams[0]);
rmm::device_uvector<T> mu_labels(0, streams[0]);
if (fit_intercept) {
mu_input.resize(input_desc.N, streams[0]);
mu_labels.resize(1, streams[0]);
if (normalize) { norm2_input.resize(input_desc.N, streams[0]); }
GLM::opg::preProcessData(handle,
input_data,
input_desc,
labels,
mu_input.data(),
mu_labels.data(),
norm2_input.data(),
fit_intercept,
normalize,
streams,
n_streams,
verbose);
}
if (algo == 0 || input_desc.N == 1) {
ASSERT(false, "olsFit: no algorithm with this id has been implemented");
} else if (algo == 1) {
LinAlg::opg::lstsqEig(handle, input_data, input_desc, labels, coef, streams, n_streams);
} else {
ASSERT(false, "olsFit: no algorithm with this id has been implemented");
}
if (fit_intercept) {
GLM::opg::postProcessData(handle,
input_data,
input_desc,
labels,
coef,
intercept,
mu_input.data(),
mu_labels.data(),
norm2_input.data(),
fit_intercept,
normalize,
streams,
n_streams,
verbose);
} else {
*intercept = T(0);
}
}
/**
* @brief performs MNMG fit operation for the ols
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @input param labels: labels data
* @output param coef: learned regression coefficients
* @output param intercept: intercept value
* @input param fit_intercept: fit intercept or not
* @input param normalize: normalize the data or not
* @input param verbose
*/
template <typename T>
void fit_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input_data,
Matrix::PartDescriptor& input_desc,
std::vector<Matrix::Data<T>*>& labels,
T* coef,
T* intercept,
bool fit_intercept,
bool normalize,
int algo,
bool verbose)
{
int rank = handle.get_comms().get_rank();
// TODO: These streams should come from raft::handle_t
int n_streams = input_desc.blocksOwnedBy(rank).size();
hipStream_t streams[n_streams];
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamCreate(&streams[i]));
}
fit_impl(handle,
input_data,
input_desc,
labels,
coef,
intercept,
fit_intercept,
normalize,
algo,
streams,
n_streams,
verbose);
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamSynchronize(streams[i]));
}
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamDestroy(streams[i]));
}
}
template <typename T>
void predict_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input_data,
Matrix::PartDescriptor& input_desc,
T* coef,
T intercept,
std::vector<Matrix::Data<T>*>& preds,
hipStream_t* streams,
int n_streams,
bool verbose)
{
std::vector<Matrix::RankSizePair*> local_blocks = input_desc.partsToRanks;
T alpha = T(1);
T beta = T(0);
for (std::size_t i = 0; i < input_data.size(); i++) {
int si = i % n_streams;
raft::linalg::gemm(handle,
input_data[i]->ptr,
local_blocks[i]->size,
input_desc.N,
coef,
preds[i]->ptr,
local_blocks[i]->size,
size_t(1),
HIPBLAS_OP_N,
HIPBLAS_OP_N,
alpha,
beta,
streams[si]);
raft::linalg::addScalar(
preds[i]->ptr, preds[i]->ptr, intercept, local_blocks[i]->size, streams[si]);
}
}
template <typename T>
void predict_impl(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
size_t n_parts,
Matrix::Data<T>** input,
size_t n_rows,
size_t n_cols,
T* coef,
T intercept,
Matrix::Data<T>** preds,
bool verbose)
{
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts);
std::vector<Matrix::Data<T>*> input_data(input, input + n_parts);
Matrix::PartDescriptor input_desc(n_rows, n_cols, ranksAndSizes, rank);
std::vector<Matrix::Data<T>*> preds_data(preds, preds + n_parts);
// TODO: These streams should come from raft::handle_t
int n_streams = n_parts;
hipStream_t streams[n_streams];
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamCreate(&streams[i]));
}
predict_impl(
handle, input_data, input_desc, coef, intercept, preds_data, streams, n_streams, verbose);
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamSynchronize(streams[i]));
}
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamDestroy(streams[i]));
}
}
void fit(raft::handle_t& handle,
std::vector<Matrix::Data<float>*>& input_data,
Matrix::PartDescriptor& input_desc,
std::vector<Matrix::Data<float>*>& labels,
float* coef,
float* intercept,
bool fit_intercept,
bool normalize,
int algo,
bool verbose)
{
fit_impl(handle,
input_data,
input_desc,
labels,
coef,
intercept,
fit_intercept,
normalize,
algo,
verbose);
}
void fit(raft::handle_t& handle,
std::vector<Matrix::Data<double>*>& input_data,
Matrix::PartDescriptor& input_desc,
std::vector<Matrix::Data<double>*>& labels,
double* coef,
double* intercept,
bool fit_intercept,
bool normalize,
int algo,
bool verbose)
{
fit_impl(handle,
input_data,
input_desc,
labels,
coef,
intercept,
fit_intercept,
normalize,
algo,
verbose);
}
void predict(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
size_t n_parts,
Matrix::Data<float>** input,
size_t n_rows,
size_t n_cols,
float* coef,
float intercept,
Matrix::Data<float>** preds,
bool verbose)
{
predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef, intercept, preds, verbose);
}
void predict(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
size_t n_parts,
Matrix::Data<double>** input,
size_t n_rows,
size_t n_cols,
double* coef,
double intercept,
Matrix::Data<double>** preds,
bool verbose)
{
predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef, intercept, preds, verbose);
}
} // namespace opg
} // namespace OLS
} // namespace ML
| 9fee91a5b0b67116366b0737f5ac103563a7e5f6.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/linear_model/ols_mg.hpp>
#include <cuml/linear_model/preprocess_mg.hpp>
#include <opg/linalg/lstsq.hpp>
#include <opg/stats/mean.hpp>
#include <raft/comms/comms.hpp>
#include <raft/cuda_utils.cuh>
#include <raft/linalg/add.cuh>
#include <raft/linalg/gemm.cuh>
#include <raft/matrix/math.cuh>
#include <raft/matrix/matrix.cuh>
#include <raft/mr/device/allocator.hpp>
#include <raft/mr/host/allocator.hpp>
#include <rmm/device_uvector.hpp>
#include <cstddef>
using namespace MLCommon;
namespace ML {
namespace OLS {
namespace opg {
template <typename T>
void fit_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input_data,
Matrix::PartDescriptor& input_desc,
std::vector<Matrix::Data<T>*>& labels,
T* coef,
T* intercept,
bool fit_intercept,
bool normalize,
int algo,
cudaStream_t* streams,
int n_streams,
bool verbose)
{
rmm::device_uvector<T> mu_input(0, streams[0]);
rmm::device_uvector<T> norm2_input(0, streams[0]);
rmm::device_uvector<T> mu_labels(0, streams[0]);
if (fit_intercept) {
mu_input.resize(input_desc.N, streams[0]);
mu_labels.resize(1, streams[0]);
if (normalize) { norm2_input.resize(input_desc.N, streams[0]); }
GLM::opg::preProcessData(handle,
input_data,
input_desc,
labels,
mu_input.data(),
mu_labels.data(),
norm2_input.data(),
fit_intercept,
normalize,
streams,
n_streams,
verbose);
}
if (algo == 0 || input_desc.N == 1) {
ASSERT(false, "olsFit: no algorithm with this id has been implemented");
} else if (algo == 1) {
LinAlg::opg::lstsqEig(handle, input_data, input_desc, labels, coef, streams, n_streams);
} else {
ASSERT(false, "olsFit: no algorithm with this id has been implemented");
}
if (fit_intercept) {
GLM::opg::postProcessData(handle,
input_data,
input_desc,
labels,
coef,
intercept,
mu_input.data(),
mu_labels.data(),
norm2_input.data(),
fit_intercept,
normalize,
streams,
n_streams,
verbose);
} else {
*intercept = T(0);
}
}
/**
* @brief performs MNMG fit operation for the ols
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @input param labels: labels data
* @output param coef: learned regression coefficients
* @output param intercept: intercept value
* @input param fit_intercept: fit intercept or not
* @input param normalize: normalize the data or not
* @input param verbose
*/
template <typename T>
void fit_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input_data,
Matrix::PartDescriptor& input_desc,
std::vector<Matrix::Data<T>*>& labels,
T* coef,
T* intercept,
bool fit_intercept,
bool normalize,
int algo,
bool verbose)
{
int rank = handle.get_comms().get_rank();
// TODO: These streams should come from raft::handle_t
int n_streams = input_desc.blocksOwnedBy(rank).size();
cudaStream_t streams[n_streams];
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamCreate(&streams[i]));
}
fit_impl(handle,
input_data,
input_desc,
labels,
coef,
intercept,
fit_intercept,
normalize,
algo,
streams,
n_streams,
verbose);
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamSynchronize(streams[i]));
}
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamDestroy(streams[i]));
}
}
template <typename T>
void predict_impl(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input_data,
Matrix::PartDescriptor& input_desc,
T* coef,
T intercept,
std::vector<Matrix::Data<T>*>& preds,
cudaStream_t* streams,
int n_streams,
bool verbose)
{
std::vector<Matrix::RankSizePair*> local_blocks = input_desc.partsToRanks;
T alpha = T(1);
T beta = T(0);
for (std::size_t i = 0; i < input_data.size(); i++) {
int si = i % n_streams;
raft::linalg::gemm(handle,
input_data[i]->ptr,
local_blocks[i]->size,
input_desc.N,
coef,
preds[i]->ptr,
local_blocks[i]->size,
size_t(1),
CUBLAS_OP_N,
CUBLAS_OP_N,
alpha,
beta,
streams[si]);
raft::linalg::addScalar(
preds[i]->ptr, preds[i]->ptr, intercept, local_blocks[i]->size, streams[si]);
}
}
template <typename T>
void predict_impl(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
size_t n_parts,
Matrix::Data<T>** input,
size_t n_rows,
size_t n_cols,
T* coef,
T intercept,
Matrix::Data<T>** preds,
bool verbose)
{
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts);
std::vector<Matrix::Data<T>*> input_data(input, input + n_parts);
Matrix::PartDescriptor input_desc(n_rows, n_cols, ranksAndSizes, rank);
std::vector<Matrix::Data<T>*> preds_data(preds, preds + n_parts);
// TODO: These streams should come from raft::handle_t
int n_streams = n_parts;
cudaStream_t streams[n_streams];
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamCreate(&streams[i]));
}
predict_impl(
handle, input_data, input_desc, coef, intercept, preds_data, streams, n_streams, verbose);
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamSynchronize(streams[i]));
}
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamDestroy(streams[i]));
}
}
void fit(raft::handle_t& handle,
std::vector<Matrix::Data<float>*>& input_data,
Matrix::PartDescriptor& input_desc,
std::vector<Matrix::Data<float>*>& labels,
float* coef,
float* intercept,
bool fit_intercept,
bool normalize,
int algo,
bool verbose)
{
fit_impl(handle,
input_data,
input_desc,
labels,
coef,
intercept,
fit_intercept,
normalize,
algo,
verbose);
}
void fit(raft::handle_t& handle,
std::vector<Matrix::Data<double>*>& input_data,
Matrix::PartDescriptor& input_desc,
std::vector<Matrix::Data<double>*>& labels,
double* coef,
double* intercept,
bool fit_intercept,
bool normalize,
int algo,
bool verbose)
{
fit_impl(handle,
input_data,
input_desc,
labels,
coef,
intercept,
fit_intercept,
normalize,
algo,
verbose);
}
void predict(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
size_t n_parts,
Matrix::Data<float>** input,
size_t n_rows,
size_t n_cols,
float* coef,
float intercept,
Matrix::Data<float>** preds,
bool verbose)
{
predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef, intercept, preds, verbose);
}
void predict(raft::handle_t& handle,
Matrix::RankSizePair** rank_sizes,
size_t n_parts,
Matrix::Data<double>** input,
size_t n_rows,
size_t n_cols,
double* coef,
double intercept,
Matrix::Data<double>** preds,
bool verbose)
{
predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef, intercept, preds, verbose);
}
} // namespace opg
} // namespace OLS
} // namespace ML
|
35c9aa202a6bb59061a09c86460450512649e9cf.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2022-2023 by XGBoost Contributors
*/
#include <thrust/iterator/counting_iterator.h> // thrust::make_counting_iterator
#include <cstddef> // size_t
#include "cuda_context.cuh" // CUDAContext
#include "device_helpers_hip.cuh" // dh::MakeTransformIterator, tcbegin, tcend
#include "optional_weight.h" // common::OptionalWeights
#include "stats.cuh" // common::SegmentedQuantile, common::SegmentedWeightedQuantile
#include "xgboost/base.h" // XGBOOST_DEVICE
#include "xgboost/context.h" // Context
#include "xgboost/host_device_vector.h" // HostDeviceVector
#include "xgboost/linalg.h" // linalg::TensorView, UnravelIndex, Apply
namespace xgboost {
namespace common {
namespace cuda_impl {
void Median(Context const* ctx, linalg::TensorView<float const, 2> t,
common::OptionalWeights weights, linalg::Tensor<float, 1>* out) {
CHECK_GE(t.Shape(1), 1);
HostDeviceVector<std::size_t> segments(t.Shape(1) + 1, 0);
segments.SetDevice(ctx->gpu_id);
auto d_segments = segments.DeviceSpan();
dh::LaunchN(d_segments.size(), ctx->CUDACtx()->Stream(),
[=] XGBOOST_DEVICE(std::size_t i) { d_segments[i] = t.Shape(0) * i; });
auto val_it = dh::MakeTransformIterator<float>(
thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(size_t i) {
return linalg::detail::Apply(t, linalg::UnravelIndex(i, t.Shape()));
});
out->SetDevice(ctx->gpu_id);
out->Reshape(t.Shape(1));
if (weights.Empty()) {
common::SegmentedQuantile(ctx, 0.5, dh::tcbegin(d_segments), dh::tcend(d_segments), val_it,
val_it + t.Size(), out->Data());
} else {
CHECK_NE(t.Shape(1), 0);
auto w_it = dh::MakeTransformIterator<float>(thrust::make_counting_iterator(0ul),
[=] XGBOOST_DEVICE(std::size_t i) {
auto sample_idx = i / t.Shape(1);
return weights[sample_idx];
});
common::SegmentedWeightedQuantile(ctx, 0.5, dh::tcbegin(d_segments), dh::tcend(d_segments),
val_it, val_it + t.Size(), w_it, w_it + t.Size(),
out->Data());
}
}
void Mean(Context const* ctx, linalg::VectorView<float const> v, linalg::VectorView<float> out) {
float n = v.Size();
auto it = dh::MakeTransformIterator<float>(
thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(std::size_t i) { return v(i) / n; });
std::size_t bytes;
CHECK_EQ(out.Size(), 1);
auto s = ctx->CUDACtx()->Stream();
hipcub::DeviceReduce::Sum(nullptr, bytes, it, out.Values().data(), v.Size(), s);
dh::TemporaryArray<char> temp{bytes};
hipcub::DeviceReduce::Sum(temp.data().get(), bytes, it, out.Values().data(), v.Size(), s);
}
} // namespace cuda_impl
} // namespace common
} // namespace xgboost
| 35c9aa202a6bb59061a09c86460450512649e9cf.cu | /**
* Copyright 2022-2023 by XGBoost Contributors
*/
#include <thrust/iterator/counting_iterator.h> // thrust::make_counting_iterator
#include <cstddef> // size_t
#include "cuda_context.cuh" // CUDAContext
#include "device_helpers.cuh" // dh::MakeTransformIterator, tcbegin, tcend
#include "optional_weight.h" // common::OptionalWeights
#include "stats.cuh" // common::SegmentedQuantile, common::SegmentedWeightedQuantile
#include "xgboost/base.h" // XGBOOST_DEVICE
#include "xgboost/context.h" // Context
#include "xgboost/host_device_vector.h" // HostDeviceVector
#include "xgboost/linalg.h" // linalg::TensorView, UnravelIndex, Apply
namespace xgboost {
namespace common {
namespace cuda_impl {
void Median(Context const* ctx, linalg::TensorView<float const, 2> t,
common::OptionalWeights weights, linalg::Tensor<float, 1>* out) {
CHECK_GE(t.Shape(1), 1);
HostDeviceVector<std::size_t> segments(t.Shape(1) + 1, 0);
segments.SetDevice(ctx->gpu_id);
auto d_segments = segments.DeviceSpan();
dh::LaunchN(d_segments.size(), ctx->CUDACtx()->Stream(),
[=] XGBOOST_DEVICE(std::size_t i) { d_segments[i] = t.Shape(0) * i; });
auto val_it = dh::MakeTransformIterator<float>(
thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(size_t i) {
return linalg::detail::Apply(t, linalg::UnravelIndex(i, t.Shape()));
});
out->SetDevice(ctx->gpu_id);
out->Reshape(t.Shape(1));
if (weights.Empty()) {
common::SegmentedQuantile(ctx, 0.5, dh::tcbegin(d_segments), dh::tcend(d_segments), val_it,
val_it + t.Size(), out->Data());
} else {
CHECK_NE(t.Shape(1), 0);
auto w_it = dh::MakeTransformIterator<float>(thrust::make_counting_iterator(0ul),
[=] XGBOOST_DEVICE(std::size_t i) {
auto sample_idx = i / t.Shape(1);
return weights[sample_idx];
});
common::SegmentedWeightedQuantile(ctx, 0.5, dh::tcbegin(d_segments), dh::tcend(d_segments),
val_it, val_it + t.Size(), w_it, w_it + t.Size(),
out->Data());
}
}
void Mean(Context const* ctx, linalg::VectorView<float const> v, linalg::VectorView<float> out) {
float n = v.Size();
auto it = dh::MakeTransformIterator<float>(
thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(std::size_t i) { return v(i) / n; });
std::size_t bytes;
CHECK_EQ(out.Size(), 1);
auto s = ctx->CUDACtx()->Stream();
cub::DeviceReduce::Sum(nullptr, bytes, it, out.Values().data(), v.Size(), s);
dh::TemporaryArray<char> temp{bytes};
cub::DeviceReduce::Sum(temp.data().get(), bytes, it, out.Values().data(), v.Size(), s);
}
} // namespace cuda_impl
} // namespace common
} // namespace xgboost
|
141b021952c9da97705a6ae83367dd8bee4ac5c0.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <atomic>
#include <cstdlib>
#include <string>
#include <unordered_map>
#include "hipcub/hipcub.hpp"
#ifdef CAFFE2_USE_CNMEM
#include "cnmem.h"
#endif // CAFFE2_USE_CNMEM
#include "caffe2/core/asan.h"
#include "caffe2/core/common_cudnn.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/init.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/tensor.h"
#include "caffe2/utils/string_utils.h"
#define CNMEM_CHECK(condition) \
do { \
cnmemStatus_t error = condition; \
CAFFE_ENFORCE_EQ(error, CNMEM_STATUS_SUCCESS, cnmemGetErrorString(error)); \
} while (0)
CAFFE2_DEFINE_string(caffe2_cuda_memory_pool, "",
"Sets the memory pool used by caffe2. Possible values are "
"none, cnmen and cub.");
CAFFE2_DEFINE_double(caffe2_cnmem_reserve, 0.8,
"Sets the proportion of memory pre-allocated by the memory "
"pool if you use cnmem.");
CAFFE2_DEFINE_string(caffe2_cnmem_gpus, "",
"A comma separated list containing the index of gpus that "
"we will set the memory pool on. If not set, we will set "
"up the memory pool on all available GPUs. This only applies "
"to cnmem.");
// TODO(jiayq): Figure out the best default values for the params below.
// Currently we are using the setting copied from caffe.
CAFFE2_DEFINE_int(caffe2_cub_bin_growth, 2,
"If using cub as the memory allocator, sets the growth of bins "
"used by the cub pool.");
CAFFE2_DEFINE_int(caffe2_cub_min_bin, 6,
"If using cub as the memory allocator, sets the min number of "
"bins.");
CAFFE2_DEFINE_int(caffe2_cub_max_bin, 16,
"If using cub as the memory allocator, sets the max number of "
"bins.");
namespace caffe2 {
CAFFE_KNOWN_TYPE(Tensor<CUDAContext>);
thread_local ThreadLocalCUDAObjects CUDAContext::cuda_objects_;
// TODO(jiayq): these variables shouldn't be currently accessed during static
// initialization. We should consider moving them to a Mayer's singleton to
// be totally safe against SIOF.
// Static global variables for setting up the memory pool.
CudaMemoryPoolType g_cuda_memory_pool_type;
#ifdef CAFFE2_USE_CNMEM
// For cnmem allocator
vector<bool> g_cnmem_available_for_device;
#endif // CAFFE2_USE_CNMEM
// For cub allocator
unique_ptr<hipcub::CachingDeviceAllocator> g_cub_allocator;
// an unordered map that holds the map from the cuda memory pointer to the
// device id that it is allocated from. This is used in the cuda memory pool
// cases, where we need the device id to carry out the deletion.
// Note(jiayq): an alternate approach is to use cudaGetPointerAttributes, but
// that is usually quite slow. We might want to benchmark the speed difference
// though.
// Note(jiayq): another alternate approach is to augment the Tensor class that
// would allow one to record the device id. However, this does not address any
// non-tensor allocation and deallocation.
// Ideally, a memory pool should already have the device id information, as
// long as we are using UVA (as of CUDA 5 and later) so the addresses are
// unique.
static std::unordered_map<void*, uint8_t> g_cuda_device_affiliation;
CudaMemoryPoolType GetCudaMemoryPoolType() {
return g_cuda_memory_pool_type;
}
///////////////////////////////////////////////////////////////////////////////
// A wrapper to allow us to lazily initialize all cuda environments that Caffe
// uses. This gets done the first time a caffe2::CUDAContext::New() gets called
// which is probably the decisive indication that this caffe2 run is going to
// use GPUs. We avoid cuda initialization with core/init.h functionalities so
// that we have minimal resource impact in case we will need to run multiple
// caffe2 instances on a GPU machine.
///////////////////////////////////////////////////////////////////////////////
static void Caffe2InitializeCuda() {
// If the current run does not have any cuda devices, do nothing.
if (!HasCudaGPU()) {
VLOG(1) << "No cuda gpu present. Skipping.";
return;
}
// Check if the number of GPUs matches the expected compile-time max number
// of GPUs.
CAFFE_ENFORCE_LE(
NumCudaDevices(),
CAFFE2_COMPILE_TIME_MAX_GPUS,
"Number of CUDA devices on the machine is larger than the compiled "
"max number of gpus expected (",
CAFFE2_COMPILE_TIME_MAX_GPUS,
"). Increase that and recompile the caffe binary.");
// Save the current device so we can restore it after moving across
// different devices.
int init_device;
CUDA_ENFORCE(hipGetDevice(&init_device));
for (int i = 0; i < NumCudaDevices(); ++i) {
auto err = hipSetDevice(i);
if (err != hipSuccess) {
LOG(WARNING)
<< "Cannot use device " << i
<< "due to the following error: " << hipGetErrorString(err);
continue;
}
// Enable peer access.
for (int j = 0; j < NumCudaDevices(); ++j) {
if (i == j) continue;
int can_access;
CUDA_ENFORCE(hipDeviceCanAccessPeer(&can_access, i, j));
if (can_access) {
VLOG(1) << "Enabling peer access from " << i << " to " << j;
// Note: just for future reference, the 0 here is not a gpu id, it is
// a reserved flag for hipDeviceEnablePeerAccess that should always be
// zero currently.
CUDA_ENFORCE(hipDeviceEnablePeerAccess(j, 0));
}
}
}
// Restore the current device.
CUDA_ENFORCE(hipSetDevice(init_device));
RegisterShapeCallFunction(
TypeMeta::Id<Tensor<CUDAContext>>(),
GetTensorShape<CUDAContext>
);
// Check the versions of cuDNN that were compiled and linked with are compatible
CheckCuDNNVersions();
}
#ifdef CAFFE2_USE_CNMEM
static void SetUpCNMEM() {
g_cnmem_available_for_device.assign(NumCudaDevices(), false);
VLOG(1) << "Setting up cnmem memory pool.";
vector<int> device_ids;
// If the cnmem gpus are not set, set up all gpus.
if (FLAGS_caffe2_cnmem_gpus.size() == 0) {
device_ids.resize(NumCudaDevices());
for (int i = 0; i < device_ids.size(); ++i) {
device_ids[i] = i;
}
} else {
vector<string> device_ids_str = split(',', FLAGS_caffe2_cnmem_gpus);
for (const string& id_str : device_ids_str) {
int id = 0;
try {
id = std::stoi(id_str);
} catch (...) {
CAFFE_THROW(
"Cannot parse device id ",
id_str,
" to a valid int number.");
}
device_ids.push_back(id);
}
}
CAFFE_ENFORCE(FLAGS_caffe2_cnmem_reserve >= 0 &&
FLAGS_caffe2_cnmem_reserve < 1.0,
"caffe2_cnmem_reserve number must be in [0, 1)");
vector<cnmemDevice_t> cnmem_devs(device_ids.size());
for (int i = 0; i < device_ids.size(); ++i) {
const int id = device_ids[i];
CAFFE_ENFORCE(
id >= 0 && id < NumCudaDevices(),
"GPU id ", id, " out of the range of available GPUs.");
DeviceGuard guard(id);
size_t free, used;
CUDA_ENFORCE(hipMemGetInfo(&free, &used));
VLOG(1) << "Reserving " << FLAGS_caffe2_cnmem_reserve * 100
<< " percent of the free memory (total " << free
<< ") on device " << id;
// Note: we create a dummy non-null stream for memory allocations, so that
// any malloc can be called from any cuda stream, since caffe2 uses a lot of
// non-default streams for computation. We will allocate all the reserved
// memory to that non-null stream.
cnmem_devs[i].device = id;
cnmem_devs[i].size = size_t(FLAGS_caffe2_cnmem_reserve * free);
cnmem_devs[i].numStreams = 0;
cnmem_devs[i].streamSizes = nullptr;
g_cnmem_available_for_device[id] = true;
}
CNMEM_CHECK(
cnmemInit(cnmem_devs.size(), cnmem_devs.data(), CNMEM_FLAGS_DEFAULT));
VLOG(1) << "Done setting up cnmem memory pool.";
}
#endif // CAFFE2_USE_CNMEM
static void SetUpCub() {
VLOG(1) << "Setting up cub memory pool.";
const bool k_cub_debug =
#ifdef NDEBUG
false;
#else
true;
#endif
// Sets up the cub memory pool
try {
g_cub_allocator.reset(new hipcub::CachingDeviceAllocator(
FLAGS_caffe2_cub_bin_growth,
FLAGS_caffe2_cub_min_bin,
FLAGS_caffe2_cub_max_bin,
static_cast<size_t>(-1),
false,
k_cub_debug));
} catch (...) {
CAFFE_THROW("Some error happened at cub initialization.");
}
VLOG(1) << "Done setting up cub memory pool.";
}
static void Caffe2SetCUDAMemoryPool() {
if (FLAGS_caffe2_cuda_memory_pool == "" ||
FLAGS_caffe2_cuda_memory_pool == "none") {
g_cuda_memory_pool_type = CudaMemoryPoolType::NONE;
} else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") {
#ifdef CAFFE2_USE_CNMEM
// sets up cnmem.
g_cuda_memory_pool_type = CudaMemoryPoolType::CNMEM;
SetUpCNMEM();
#else
CAFFE_THROW("This caffe2 is not built with cnmem support, so you should "
"not use the cnmem memory pool type.");
#endif // CAFFE2_USE_CNMEM
} else if (FLAGS_caffe2_cuda_memory_pool == "cub") {
// Sets up cub.
g_cuda_memory_pool_type = CudaMemoryPoolType::CUB;
SetUpCub();
} else {
CAFFE_THROW("Unrecognized cuda memory pool type: ",
FLAGS_caffe2_cuda_memory_pool);
}
}
// An initialization function that sets the CPU side to use pinned cpu
// allocator.
void Caffe2UsePinnedCPUAllocator() {
#if CAFFE2_ASAN_ENABLED
// Note(jiayq): for more details, see
// https://github.com/google/sanitizers/issues/629
LOG(WARNING) << "There are known issues between address sanitizer and "
"hipHostMalloc. As a result, caffe2 will not enable pinned "
"memory allocation in asan mode. If you are expecting any "
"behavior that depends on asan, be advised that it is not "
"turned on.";
#else
if (!HasCudaGPU()) {
VLOG(1) << "No GPU present. I won't use pinned allocator then.";
return;
}
VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator.";
SetCPUAllocator(new PinnedCPUAllocator());
#endif
}
// Caffe2CudaInitializerHelper is a minimal struct whose sole purpose is to
// detect the first hint that this Caffe2 run is going to use GPU: either
// CUDAContext is initialized or CUDAContext::New is called. It then runs
// all the related cuda initialization functions.
namespace {
struct Caffe2CudaInitializerHelper {
Caffe2CudaInitializerHelper() {
// We cannot use bool because nvcc changes bool to __nv_bool which does
// not have a std::atomic instantiation.
static std::atomic<char> first_call(1);
if (first_call.fetch_and((char)0)) {
Caffe2InitializeCuda();
Caffe2SetCUDAMemoryPool();
Caffe2UsePinnedCPUAllocator();
}
}
};
} // namespace
CUDAContext::CUDAContext(const int gpu_id)
: gpu_id_(gpu_id == -1 ? GetDefaultGPUID() : gpu_id)
, random_seed_(math::randomNumberSeed()) {
static Caffe2CudaInitializerHelper g_cuda_initializer_;
}
CUDAContext::CUDAContext(const DeviceOption& option)
: gpu_id_(option.has_cuda_gpu_id() ?
option.cuda_gpu_id() : GetDefaultGPUID()),
random_seed_(option.has_random_seed() ?
option.random_seed() : math::randomNumberSeed()) {
static Caffe2CudaInitializerHelper g_cuda_initializer_;
DCHECK_EQ(option.device_type(), CUDA);
}
// shared mutex to lock out alloc / free during NCCL launches
std::mutex& CUDAContext::mutex() {
static std::mutex m;
return m;
}
void* CUDAContext::New(size_t nbytes) {
// Lock the mutex
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
// A one-time caffe2 cuda initializer.
static Caffe2CudaInitializerHelper g_cuda_initializer_;
void* ptr = nullptr;
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE:
CUDA_ENFORCE(hipMalloc(&ptr, nbytes));
return ptr;
case CudaMemoryPoolType::CNMEM: {
#ifdef CAFFE2_USE_CNMEM
auto gpuId = GetCurrentGPUID();
CAFFE_ENFORCE(
gpuId < g_cnmem_available_for_device.size() &&
g_cnmem_available_for_device[gpuId],
"Trying to allocate on device ",
gpuId,
" but cnmem pool is not set up for it.");
CNMEM_CHECK(cnmemMalloc(&ptr, nbytes, nullptr));
g_cuda_device_affiliation[ptr] = GetCurrentGPUID();
VLOG(2) << "CNMEM allocating pointer " << ptr << " on device "
<< GetCurrentGPUID();
return ptr;
#else
CAFFE_THROW("This caffe2 is not built with cnmem support, so you should "
"not use the cnmem memory pool type.");
#endif // CAFFE2_USE_CNMEM
}
case CudaMemoryPoolType::CUB:
CUDA_ENFORCE(g_cub_allocator->DeviceAllocate(&ptr, nbytes));
g_cuda_device_affiliation[ptr] = GetCurrentGPUID();
VLOG(2) << "CUB allocating pointer " << ptr << " on device "
<< GetCurrentGPUID();
return ptr;
}
return nullptr;
}
void CUDAContext::Delete(void* ptr) {
// lock the mutex
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE: {
// If memory pool is not set up, use simple hipFree.
hipError_t error = hipFree(ptr);
// For some reason, in Python runtime we sometimes delete a data pointer
// after the cuda runtime exits - this is odd but is probably caused by
// a static workspace that pycaffe2 uses, and the destruction got
// entangled in some race condition. Anyway, since cuda runtime is exiting
// anyway, we will not need to worry about memory leak, so we basically
// ignore it. This is definitely not ideal but works for now.
if (error != hipSuccess && error != hipErrorDeinitialized) {
LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": "
<< hipGetErrorString(error);
}
break; }
case CudaMemoryPoolType::CNMEM: {
#ifdef CAFFE2_USE_CNMEM
auto it = g_cuda_device_affiliation.find(ptr);
DCHECK(it != g_cuda_device_affiliation.end());
DeviceGuard guard(it->second);
VLOG(2) << "CNMEM freeing pointer " << ptr << " on device " << it->second;
CNMEM_CHECK(cnmemFree(ptr, nullptr));
g_cuda_device_affiliation.erase(it);
break;
#else
CAFFE_THROW("This caffe2 is not built with cnmem support, so you should "
"not use the cnmem memory pool type.");
#endif // CAFFE2_USE_CNMEM
}
case CudaMemoryPoolType::CUB: {
auto it = g_cuda_device_affiliation.find(ptr);
DCHECK(it != g_cuda_device_affiliation.end());
VLOG(2) << "CUB freeing pointer " << ptr << " on device " << it->second;
CUDA_ENFORCE(g_cub_allocator->DeviceFree(it->second, ptr));
g_cuda_device_affiliation.erase(it);
break;
}
}
}
} // namespace caffe2
| 141b021952c9da97705a6ae83367dd8bee4ac5c0.cu | #include <algorithm>
#include <atomic>
#include <cstdlib>
#include <string>
#include <unordered_map>
#include "cub/util_allocator.cuh"
#ifdef CAFFE2_USE_CNMEM
#include "cnmem.h"
#endif // CAFFE2_USE_CNMEM
#include "caffe2/core/asan.h"
#include "caffe2/core/common_cudnn.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/init.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/tensor.h"
#include "caffe2/utils/string_utils.h"
#define CNMEM_CHECK(condition) \
do { \
cnmemStatus_t error = condition; \
CAFFE_ENFORCE_EQ(error, CNMEM_STATUS_SUCCESS, cnmemGetErrorString(error)); \
} while (0)
CAFFE2_DEFINE_string(caffe2_cuda_memory_pool, "",
"Sets the memory pool used by caffe2. Possible values are "
"none, cnmen and cub.");
CAFFE2_DEFINE_double(caffe2_cnmem_reserve, 0.8,
"Sets the proportion of memory pre-allocated by the memory "
"pool if you use cnmem.");
CAFFE2_DEFINE_string(caffe2_cnmem_gpus, "",
"A comma separated list containing the index of gpus that "
"we will set the memory pool on. If not set, we will set "
"up the memory pool on all available GPUs. This only applies "
"to cnmem.");
// TODO(jiayq): Figure out the best default values for the params below.
// Currently we are using the setting copied from caffe.
CAFFE2_DEFINE_int(caffe2_cub_bin_growth, 2,
"If using cub as the memory allocator, sets the growth of bins "
"used by the cub pool.");
CAFFE2_DEFINE_int(caffe2_cub_min_bin, 6,
"If using cub as the memory allocator, sets the min number of "
"bins.");
CAFFE2_DEFINE_int(caffe2_cub_max_bin, 16,
"If using cub as the memory allocator, sets the max number of "
"bins.");
namespace caffe2 {
CAFFE_KNOWN_TYPE(Tensor<CUDAContext>);
thread_local ThreadLocalCUDAObjects CUDAContext::cuda_objects_;
// TODO(jiayq): these variables shouldn't be currently accessed during static
// initialization. We should consider moving them to a Mayer's singleton to
// be totally safe against SIOF.
// Static global variables for setting up the memory pool.
CudaMemoryPoolType g_cuda_memory_pool_type;
#ifdef CAFFE2_USE_CNMEM
// For cnmem allocator
vector<bool> g_cnmem_available_for_device;
#endif // CAFFE2_USE_CNMEM
// For cub allocator
unique_ptr<cub::CachingDeviceAllocator> g_cub_allocator;
// an unordered map that holds the map from the cuda memory pointer to the
// device id that it is allocated from. This is used in the cuda memory pool
// cases, where we need the device id to carry out the deletion.
// Note(jiayq): an alternate approach is to use cudaGetPointerAttributes, but
// that is usually quite slow. We might want to benchmark the speed difference
// though.
// Note(jiayq): another alternate approach is to augment the Tensor class that
// would allow one to record the device id. However, this does not address any
// non-tensor allocation and deallocation.
// Ideally, a memory pool should already have the device id information, as
// long as we are using UVA (as of CUDA 5 and later) so the addresses are
// unique.
static std::unordered_map<void*, uint8_t> g_cuda_device_affiliation;
CudaMemoryPoolType GetCudaMemoryPoolType() {
return g_cuda_memory_pool_type;
}
///////////////////////////////////////////////////////////////////////////////
// A wrapper to allow us to lazily initialize all cuda environments that Caffe
// uses. This gets done the first time a caffe2::CUDAContext::New() gets called
// which is probably the decisive indication that this caffe2 run is going to
// use GPUs. We avoid cuda initialization with core/init.h functionalities so
// that we have minimal resource impact in case we will need to run multiple
// caffe2 instances on a GPU machine.
///////////////////////////////////////////////////////////////////////////////
static void Caffe2InitializeCuda() {
// If the current run does not have any cuda devices, do nothing.
if (!HasCudaGPU()) {
VLOG(1) << "No cuda gpu present. Skipping.";
return;
}
// Check if the number of GPUs matches the expected compile-time max number
// of GPUs.
CAFFE_ENFORCE_LE(
NumCudaDevices(),
CAFFE2_COMPILE_TIME_MAX_GPUS,
"Number of CUDA devices on the machine is larger than the compiled "
"max number of gpus expected (",
CAFFE2_COMPILE_TIME_MAX_GPUS,
"). Increase that and recompile the caffe binary.");
// Save the current device so we can restore it after moving across
// different devices.
int init_device;
CUDA_ENFORCE(cudaGetDevice(&init_device));
for (int i = 0; i < NumCudaDevices(); ++i) {
auto err = cudaSetDevice(i);
if (err != cudaSuccess) {
LOG(WARNING)
<< "Cannot use device " << i
<< "due to the following error: " << cudaGetErrorString(err);
continue;
}
// Enable peer access.
for (int j = 0; j < NumCudaDevices(); ++j) {
if (i == j) continue;
int can_access;
CUDA_ENFORCE(cudaDeviceCanAccessPeer(&can_access, i, j));
if (can_access) {
VLOG(1) << "Enabling peer access from " << i << " to " << j;
// Note: just for future reference, the 0 here is not a gpu id, it is
// a reserved flag for cudaDeviceEnablePeerAccess that should always be
// zero currently.
CUDA_ENFORCE(cudaDeviceEnablePeerAccess(j, 0));
}
}
}
// Restore the current device.
CUDA_ENFORCE(cudaSetDevice(init_device));
RegisterShapeCallFunction(
TypeMeta::Id<Tensor<CUDAContext>>(),
GetTensorShape<CUDAContext>
);
// Check the versions of cuDNN that were compiled and linked with are compatible
CheckCuDNNVersions();
}
#ifdef CAFFE2_USE_CNMEM
static void SetUpCNMEM() {
g_cnmem_available_for_device.assign(NumCudaDevices(), false);
VLOG(1) << "Setting up cnmem memory pool.";
vector<int> device_ids;
// If the cnmem gpus are not set, set up all gpus.
if (FLAGS_caffe2_cnmem_gpus.size() == 0) {
device_ids.resize(NumCudaDevices());
for (int i = 0; i < device_ids.size(); ++i) {
device_ids[i] = i;
}
} else {
vector<string> device_ids_str = split(',', FLAGS_caffe2_cnmem_gpus);
for (const string& id_str : device_ids_str) {
int id = 0;
try {
id = std::stoi(id_str);
} catch (...) {
CAFFE_THROW(
"Cannot parse device id ",
id_str,
" to a valid int number.");
}
device_ids.push_back(id);
}
}
CAFFE_ENFORCE(FLAGS_caffe2_cnmem_reserve >= 0 &&
FLAGS_caffe2_cnmem_reserve < 1.0,
"caffe2_cnmem_reserve number must be in [0, 1)");
vector<cnmemDevice_t> cnmem_devs(device_ids.size());
for (int i = 0; i < device_ids.size(); ++i) {
const int id = device_ids[i];
CAFFE_ENFORCE(
id >= 0 && id < NumCudaDevices(),
"GPU id ", id, " out of the range of available GPUs.");
DeviceGuard guard(id);
size_t free, used;
CUDA_ENFORCE(cudaMemGetInfo(&free, &used));
VLOG(1) << "Reserving " << FLAGS_caffe2_cnmem_reserve * 100
<< " percent of the free memory (total " << free
<< ") on device " << id;
// Note: we create a dummy non-null stream for memory allocations, so that
// any malloc can be called from any cuda stream, since caffe2 uses a lot of
// non-default streams for computation. We will allocate all the reserved
// memory to that non-null stream.
cnmem_devs[i].device = id;
cnmem_devs[i].size = size_t(FLAGS_caffe2_cnmem_reserve * free);
cnmem_devs[i].numStreams = 0;
cnmem_devs[i].streamSizes = nullptr;
g_cnmem_available_for_device[id] = true;
}
CNMEM_CHECK(
cnmemInit(cnmem_devs.size(), cnmem_devs.data(), CNMEM_FLAGS_DEFAULT));
VLOG(1) << "Done setting up cnmem memory pool.";
}
#endif // CAFFE2_USE_CNMEM
static void SetUpCub() {
VLOG(1) << "Setting up cub memory pool.";
const bool k_cub_debug =
#ifdef NDEBUG
false;
#else
true;
#endif
// Sets up the cub memory pool
try {
g_cub_allocator.reset(new cub::CachingDeviceAllocator(
FLAGS_caffe2_cub_bin_growth,
FLAGS_caffe2_cub_min_bin,
FLAGS_caffe2_cub_max_bin,
static_cast<size_t>(-1),
false,
k_cub_debug));
} catch (...) {
CAFFE_THROW("Some error happened at cub initialization.");
}
VLOG(1) << "Done setting up cub memory pool.";
}
static void Caffe2SetCUDAMemoryPool() {
if (FLAGS_caffe2_cuda_memory_pool == "" ||
FLAGS_caffe2_cuda_memory_pool == "none") {
g_cuda_memory_pool_type = CudaMemoryPoolType::NONE;
} else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") {
#ifdef CAFFE2_USE_CNMEM
// sets up cnmem.
g_cuda_memory_pool_type = CudaMemoryPoolType::CNMEM;
SetUpCNMEM();
#else
CAFFE_THROW("This caffe2 is not built with cnmem support, so you should "
"not use the cnmem memory pool type.");
#endif // CAFFE2_USE_CNMEM
} else if (FLAGS_caffe2_cuda_memory_pool == "cub") {
// Sets up cub.
g_cuda_memory_pool_type = CudaMemoryPoolType::CUB;
SetUpCub();
} else {
CAFFE_THROW("Unrecognized cuda memory pool type: ",
FLAGS_caffe2_cuda_memory_pool);
}
}
// An initialization function that sets the CPU side to use pinned cpu
// allocator.
void Caffe2UsePinnedCPUAllocator() {
#if CAFFE2_ASAN_ENABLED
// Note(jiayq): for more details, see
// https://github.com/google/sanitizers/issues/629
LOG(WARNING) << "There are known issues between address sanitizer and "
"cudaMallocHost. As a result, caffe2 will not enable pinned "
"memory allocation in asan mode. If you are expecting any "
"behavior that depends on asan, be advised that it is not "
"turned on.";
#else
if (!HasCudaGPU()) {
VLOG(1) << "No GPU present. I won't use pinned allocator then.";
return;
}
VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator.";
SetCPUAllocator(new PinnedCPUAllocator());
#endif
}
// Caffe2CudaInitializerHelper is a minimal struct whose sole purpose is to
// detect the first hint that this Caffe2 run is going to use GPU: either
// CUDAContext is initialized or CUDAContext::New is called. It then runs
// all the related cuda initialization functions.
namespace {
struct Caffe2CudaInitializerHelper {
Caffe2CudaInitializerHelper() {
// We cannot use bool because nvcc changes bool to __nv_bool which does
// not have a std::atomic instantiation.
static std::atomic<char> first_call(1);
if (first_call.fetch_and((char)0)) {
Caffe2InitializeCuda();
Caffe2SetCUDAMemoryPool();
Caffe2UsePinnedCPUAllocator();
}
}
};
} // namespace
CUDAContext::CUDAContext(const int gpu_id)
: gpu_id_(gpu_id == -1 ? GetDefaultGPUID() : gpu_id)
, random_seed_(math::randomNumberSeed()) {
static Caffe2CudaInitializerHelper g_cuda_initializer_;
}
CUDAContext::CUDAContext(const DeviceOption& option)
: gpu_id_(option.has_cuda_gpu_id() ?
option.cuda_gpu_id() : GetDefaultGPUID()),
random_seed_(option.has_random_seed() ?
option.random_seed() : math::randomNumberSeed()) {
static Caffe2CudaInitializerHelper g_cuda_initializer_;
DCHECK_EQ(option.device_type(), CUDA);
}
// shared mutex to lock out alloc / free during NCCL launches
std::mutex& CUDAContext::mutex() {
static std::mutex m;
return m;
}
void* CUDAContext::New(size_t nbytes) {
// Lock the mutex
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
// A one-time caffe2 cuda initializer.
static Caffe2CudaInitializerHelper g_cuda_initializer_;
void* ptr = nullptr;
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE:
CUDA_ENFORCE(cudaMalloc(&ptr, nbytes));
return ptr;
case CudaMemoryPoolType::CNMEM: {
#ifdef CAFFE2_USE_CNMEM
auto gpuId = GetCurrentGPUID();
CAFFE_ENFORCE(
gpuId < g_cnmem_available_for_device.size() &&
g_cnmem_available_for_device[gpuId],
"Trying to allocate on device ",
gpuId,
" but cnmem pool is not set up for it.");
CNMEM_CHECK(cnmemMalloc(&ptr, nbytes, nullptr));
g_cuda_device_affiliation[ptr] = GetCurrentGPUID();
VLOG(2) << "CNMEM allocating pointer " << ptr << " on device "
<< GetCurrentGPUID();
return ptr;
#else
CAFFE_THROW("This caffe2 is not built with cnmem support, so you should "
"not use the cnmem memory pool type.");
#endif // CAFFE2_USE_CNMEM
}
case CudaMemoryPoolType::CUB:
CUDA_ENFORCE(g_cub_allocator->DeviceAllocate(&ptr, nbytes));
g_cuda_device_affiliation[ptr] = GetCurrentGPUID();
VLOG(2) << "CUB allocating pointer " << ptr << " on device "
<< GetCurrentGPUID();
return ptr;
}
return nullptr;
}
void CUDAContext::Delete(void* ptr) {
// lock the mutex
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE: {
// If memory pool is not set up, use simple cudaFree.
cudaError_t error = cudaFree(ptr);
// For some reason, in Python runtime we sometimes delete a data pointer
// after the cuda runtime exits - this is odd but is probably caused by
// a static workspace that pycaffe2 uses, and the destruction got
// entangled in some race condition. Anyway, since cuda runtime is exiting
// anyway, we will not need to worry about memory leak, so we basically
// ignore it. This is definitely not ideal but works for now.
if (error != cudaSuccess && error != cudaErrorCudartUnloading) {
LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": "
<< cudaGetErrorString(error);
}
break; }
case CudaMemoryPoolType::CNMEM: {
#ifdef CAFFE2_USE_CNMEM
auto it = g_cuda_device_affiliation.find(ptr);
DCHECK(it != g_cuda_device_affiliation.end());
DeviceGuard guard(it->second);
VLOG(2) << "CNMEM freeing pointer " << ptr << " on device " << it->second;
CNMEM_CHECK(cnmemFree(ptr, nullptr));
g_cuda_device_affiliation.erase(it);
break;
#else
CAFFE_THROW("This caffe2 is not built with cnmem support, so you should "
"not use the cnmem memory pool type.");
#endif // CAFFE2_USE_CNMEM
}
case CudaMemoryPoolType::CUB: {
auto it = g_cuda_device_affiliation.find(ptr);
DCHECK(it != g_cuda_device_affiliation.end());
VLOG(2) << "CUB freeing pointer " << ptr << " on device " << it->second;
CUDA_ENFORCE(g_cub_allocator->DeviceFree(it->second, ptr));
g_cuda_device_affiliation.erase(it);
break;
}
}
}
} // namespace caffe2
|
997a73d3795f151fdb88bf8000c7df07d442481b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Definition Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "reference_calc.cpp"
#include "utils.h"
#include <float.h>
__device__ static float atomicMaxf(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__device__ static float atomicMinf(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__global__ void max_reduce(const float* const d_array, float* d_max,
const size_t elements)
{
extern __shared__ float shared[];
int tid = threadIdx.x;
int gid = (blockDim.x * blockIdx.x) + tid;
shared[tid] = FLT_MIN;
// load shared memory from global memory
if (gid < elements)
shared[tid] = d_array[gid];
__syncthreads();
// do max reduction in shared memory
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s && gid < elements)
shared[tid] = max(shared[tid], shared[tid + s]);
__syncthreads();
}
// only thread 0 writes result for this block back to global memory
if (tid == 0)
atomicMaxf(d_max, shared[0]);
}
__global__ void min_reduce(const float* const d_array, float* d_min,
const size_t elements)
{
extern __shared__ float shared[];
int tid = threadIdx.x;
int gid = (blockDim.x * blockIdx.x) + tid;
shared[tid] = FLT_MAX;
// load shared memory from global memory
if (gid < elements)
shared[tid] = d_array[gid];
__syncthreads();
// do min reduction in shared memory
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s && gid < elements)
shared[tid] = min(shared[tid], shared[tid + s]);
__syncthreads();
}
// only thread 0 writes result for this block back to global memory
if (tid == 0)
atomicMinf(d_min, shared[0]);
}
__global__
void histogram(const float* const d_logLuminance,
unsigned int* histo,
float logLumMin,
float logLumRange,
const size_t numBins)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
// formula: bin = (lum[i] - lumMin) / lumRange * numBins
unsigned int bin = min(static_cast<unsigned int>(numBins - 1), static_cast<unsigned int>((d_logLuminance[idx] - logLumMin) / logLumRange * numBins));
atomicAdd(&histo[bin], 1);
}
__global__
void blelloch_scan(unsigned int *g_idata, unsigned int *g_odata, int n)
{
extern __shared__ unsigned int temp[]; // allocated on invocation
int thid = threadIdx.x;
int offset = 1;
temp[2*thid] = g_idata[2*thid]; // load input into shared memory
temp[2*thid+1] = g_idata[2*thid+1];
for (int d = n>>1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (thid < d) {
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) { temp[n - 1] = 0; } // clear the last element
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
unsigned int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[2*thid] = temp[2*thid]; // write results to device memory
g_odata[2*thid+1] = temp[2*thid+1];
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
unsigned int imageSize = numRows * numCols;
std::cout << "[Debug]imageSize: " << imageSize << std::endl;
/* 1. find min and max value in d_logLuminance and store them in min_logLum and max_logLum */
// calculate grid and block size for reduce kernel
unsigned int reduce_gridSize = (imageSize%1024 == 0) ? imageSize/1024 : imageSize/1024+1;
unsigned int reduce_blockSize = 1024;
std::cout << "[Debug]reduce_gridSize: " << reduce_gridSize << std::endl;
std::cout << "[Debug]reduce_blockSize: " << reduce_blockSize << std::endl;
// declare points to max on min value
float * d_max_logLum, * d_min_logLum;
// allocate memory on device for d_max_logLum and d_min_logLum
checkCudaErrors(hipMalloc(&d_max_logLum, sizeof(float)));
checkCudaErrors(hipMalloc(&d_min_logLum, sizeof(float)));
// call max and min reduce kernel to get max_logLum and min_logLum
hipLaunchKernelGGL(( max_reduce), dim3(reduce_gridSize), dim3(reduce_blockSize), sizeof(float)*1024, 0, d_logLuminance, d_max_logLum, imageSize);
// call hipDeviceSynchronize(), then call checkCudaErrors() immediately after launching kernel
// to make sure that no mistakes were made.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( min_reduce), dim3(reduce_gridSize), dim3(reduce_blockSize), sizeof(float)*1024, 0, d_logLuminance, d_min_logLum, imageSize);
// call hipDeviceSynchronize(), then call checkCudaErrors() immediately after launching kernel
// to make sure that no mistakes were made.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// copy max and min back to host memory
checkCudaErrors(hipMemcpy(&max_logLum, d_max_logLum, sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&min_logLum, d_min_logLum, sizeof(float), hipMemcpyDeviceToHost));
// debug code to see if i got the correct max and min loglum values
std::cout << "[Debug]max_logLum: " << max_logLum << std::endl;
std::cout << "[Debug]min_logLum: " << min_logLum << std::endl;
/*2. subtract the minimum value from the maximum value in the input logLuminance channel to get the range */
float logLumRange = max_logLum - min_logLum;
// debug code to see if i got the correct max and min loglum values
std::cout << "[Debug]logLumRange: " << logLumRange << std::endl;
/* 3. generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins */
// declare a point to histogram memory
unsigned int *histo;
// allocate memory on device
checkCudaErrors(hipMalloc(&histo, sizeof(unsigned int)*numBins));
// check out cudamemset to initialize histo
hipMemset(histo, 0, sizeof(unsigned int)*numBins);
// calculate grid and block size for histogram kernel
unsigned int histo_gridSize = (imageSize%1024 == 0) ? imageSize/1024 : imageSize/1024+1;
unsigned int histo_blockSize = 1024;
std::cout << "[Debug]histo_gridSize: " << histo_gridSize << std::endl;
std::cout << "[Debug]histo_blockSize: " << histo_blockSize << std::endl;
std::cout << "[Debug]numBins: " << numBins << std::endl;
// launch the histogram kernel to get the histogram of luminance values
hipLaunchKernelGGL(( histogram), dim3(histo_gridSize), dim3(histo_blockSize), 0, 0, d_logLuminance, histo, min_logLum, logLumRange, numBins);
// call hipDeviceSynchronize(), then call checkCudaErrors() immediately after launching kernel
// to make sure that no mistakes were made.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
/* 4. Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values */
//calculate grid and block size for exclusive scan kernel
unsigned int scan_gridSize = 1;
unsigned int scan_blockSize = numBins/2;
std::cout << "[Debug]scan_blockSize: " << scan_blockSize << std::endl;
// launch the Blelloch scan kernel
hipLaunchKernelGGL(( blelloch_scan), dim3(scan_gridSize), dim3(scan_blockSize), sizeof(unsigned int)*numBins, 0, histo, d_cdf, numBins);
// call hipDeviceSynchronize(), then call checkCudaErrors() immediately after launching kernel
// to make sure that no mistakes were made.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
/* 5. Free allocated device memory */
hipFree(d_max_logLum);
hipFree(d_min_logLum);
hipFree(histo);
}
| 997a73d3795f151fdb88bf8000c7df07d442481b.cu | /* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Definition Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "reference_calc.cpp"
#include "utils.h"
#include <float.h>
__device__ static float atomicMaxf(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__device__ static float atomicMinf(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__global__ void max_reduce(const float* const d_array, float* d_max,
const size_t elements)
{
extern __shared__ float shared[];
int tid = threadIdx.x;
int gid = (blockDim.x * blockIdx.x) + tid;
shared[tid] = FLT_MIN;
// load shared memory from global memory
if (gid < elements)
shared[tid] = d_array[gid];
__syncthreads();
// do max reduction in shared memory
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s && gid < elements)
shared[tid] = max(shared[tid], shared[tid + s]);
__syncthreads();
}
// only thread 0 writes result for this block back to global memory
if (tid == 0)
atomicMaxf(d_max, shared[0]);
}
__global__ void min_reduce(const float* const d_array, float* d_min,
const size_t elements)
{
extern __shared__ float shared[];
int tid = threadIdx.x;
int gid = (blockDim.x * blockIdx.x) + tid;
shared[tid] = FLT_MAX;
// load shared memory from global memory
if (gid < elements)
shared[tid] = d_array[gid];
__syncthreads();
// do min reduction in shared memory
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s && gid < elements)
shared[tid] = min(shared[tid], shared[tid + s]);
__syncthreads();
}
// only thread 0 writes result for this block back to global memory
if (tid == 0)
atomicMinf(d_min, shared[0]);
}
__global__
void histogram(const float* const d_logLuminance,
unsigned int* histo,
float logLumMin,
float logLumRange,
const size_t numBins)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
// formula: bin = (lum[i] - lumMin) / lumRange * numBins
unsigned int bin = min(static_cast<unsigned int>(numBins - 1), static_cast<unsigned int>((d_logLuminance[idx] - logLumMin) / logLumRange * numBins));
atomicAdd(&histo[bin], 1);
}
__global__
void blelloch_scan(unsigned int *g_idata, unsigned int *g_odata, int n)
{
extern __shared__ unsigned int temp[]; // allocated on invocation
int thid = threadIdx.x;
int offset = 1;
temp[2*thid] = g_idata[2*thid]; // load input into shared memory
temp[2*thid+1] = g_idata[2*thid+1];
for (int d = n>>1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (thid < d) {
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) { temp[n - 1] = 0; } // clear the last element
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
unsigned int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[2*thid] = temp[2*thid]; // write results to device memory
g_odata[2*thid+1] = temp[2*thid+1];
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
unsigned int imageSize = numRows * numCols;
std::cout << "[Debug]imageSize: " << imageSize << std::endl;
/* 1. find min and max value in d_logLuminance and store them in min_logLum and max_logLum */
// calculate grid and block size for reduce kernel
unsigned int reduce_gridSize = (imageSize%1024 == 0) ? imageSize/1024 : imageSize/1024+1;
unsigned int reduce_blockSize = 1024;
std::cout << "[Debug]reduce_gridSize: " << reduce_gridSize << std::endl;
std::cout << "[Debug]reduce_blockSize: " << reduce_blockSize << std::endl;
// declare points to max on min value
float * d_max_logLum, * d_min_logLum;
// allocate memory on device for d_max_logLum and d_min_logLum
checkCudaErrors(cudaMalloc(&d_max_logLum, sizeof(float)));
checkCudaErrors(cudaMalloc(&d_min_logLum, sizeof(float)));
// call max and min reduce kernel to get max_logLum and min_logLum
max_reduce<<<reduce_gridSize, reduce_blockSize, sizeof(float)*1024>>>(d_logLuminance, d_max_logLum, imageSize);
// call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after launching kernel
// to make sure that no mistakes were made.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
min_reduce<<<reduce_gridSize, reduce_blockSize, sizeof(float)*1024>>>(d_logLuminance, d_min_logLum, imageSize);
// call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after launching kernel
// to make sure that no mistakes were made.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// copy max and min back to host memory
checkCudaErrors(cudaMemcpy(&max_logLum, d_max_logLum, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&min_logLum, d_min_logLum, sizeof(float), cudaMemcpyDeviceToHost));
// debug code to see if i got the correct max and min loglum values
std::cout << "[Debug]max_logLum: " << max_logLum << std::endl;
std::cout << "[Debug]min_logLum: " << min_logLum << std::endl;
/*2. subtract the minimum value from the maximum value in the input logLuminance channel to get the range */
float logLumRange = max_logLum - min_logLum;
// debug code to see if i got the correct max and min loglum values
std::cout << "[Debug]logLumRange: " << logLumRange << std::endl;
/* 3. generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins */
// declare a point to histogram memory
unsigned int *histo;
// allocate memory on device
checkCudaErrors(cudaMalloc(&histo, sizeof(unsigned int)*numBins));
// check out cudamemset to initialize histo
cudaMemset(histo, 0, sizeof(unsigned int)*numBins);
// calculate grid and block size for histogram kernel
unsigned int histo_gridSize = (imageSize%1024 == 0) ? imageSize/1024 : imageSize/1024+1;
unsigned int histo_blockSize = 1024;
std::cout << "[Debug]histo_gridSize: " << histo_gridSize << std::endl;
std::cout << "[Debug]histo_blockSize: " << histo_blockSize << std::endl;
std::cout << "[Debug]numBins: " << numBins << std::endl;
// launch the histogram kernel to get the histogram of luminance values
histogram<<<histo_gridSize, histo_blockSize>>>(d_logLuminance, histo, min_logLum, logLumRange, numBins);
// call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after launching kernel
// to make sure that no mistakes were made.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
/* 4. Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values */
//calculate grid and block size for exclusive scan kernel
unsigned int scan_gridSize = 1;
unsigned int scan_blockSize = numBins/2;
std::cout << "[Debug]scan_blockSize: " << scan_blockSize << std::endl;
// launch the Blelloch scan kernel
blelloch_scan<<<scan_gridSize, scan_blockSize, sizeof(unsigned int)*numBins>>>(histo, d_cdf, numBins);
// call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after launching kernel
// to make sure that no mistakes were made.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
/* 5. Free allocated device memory */
cudaFree(d_max_logLum);
cudaFree(d_min_logLum);
cudaFree(histo);
}
|
calcNormalFromSmoothedColorWHA.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* calcNormalFromSmoothedColorWHA.cu
*
* Created on: 02-09-2013
* Author: Kamil Szewc ([email protected])
*/
#include <math.h>
#include "../../sph.h"
#include "../../hlp.h"
#include "../../methods/kernels.cuh"
#include "../../methods/interactions.cuh"
__device__ static real2 interaction(uint i, uint j, real2 dpos, real2 dvel, Particle *p, Parameters *par)
{
real q = hypot(dpos.x, dpos.y) * par->I_H;
if (q < 2.0) {
real gkx = grad_of_kern(dpos.x, q, par->I_H);
real gky = grad_of_kern(dpos.y, q, par->I_H);
real put = p[i].cs - p[j].cs;
return MAKE_REAL2(p[j].m*put*gkx / p[j].d, p[j].m*put*gky / p[j].d);
}
else {
return MAKE_REAL2(0.0, 0.0);
}
}
__global__ void calcNormalFromSmoothedColorWHA(Particle *p,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
Parameters *par)
{
uint index = threadIdx.x + blockIdx.x*blockDim.x;
if (index < par->N) {
register real2 result = MAKE_REAL2(0.0,0.0);
#include "../../methods/interactions/interactionsNegativeOnWallNoSlip.cuh"
p[index].n.x = result.x;
p[index].n.y = result.y;
p[index].n.z = sqrt(pow2(result.x) + pow2(result.y));
}
}
| calcNormalFromSmoothedColorWHA.cu | /*
* calcNormalFromSmoothedColorWHA.cu
*
* Created on: 02-09-2013
* Author: Kamil Szewc ([email protected])
*/
#include <math.h>
#include "../../sph.h"
#include "../../hlp.h"
#include "../../methods/kernels.cuh"
#include "../../methods/interactions.cuh"
__device__ static real2 interaction(uint i, uint j, real2 dpos, real2 dvel, Particle *p, Parameters *par)
{
real q = hypot(dpos.x, dpos.y) * par->I_H;
if (q < 2.0) {
real gkx = grad_of_kern(dpos.x, q, par->I_H);
real gky = grad_of_kern(dpos.y, q, par->I_H);
real put = p[i].cs - p[j].cs;
return MAKE_REAL2(p[j].m*put*gkx / p[j].d, p[j].m*put*gky / p[j].d);
}
else {
return MAKE_REAL2(0.0, 0.0);
}
}
__global__ void calcNormalFromSmoothedColorWHA(Particle *p,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
Parameters *par)
{
uint index = threadIdx.x + blockIdx.x*blockDim.x;
if (index < par->N) {
register real2 result = MAKE_REAL2(0.0,0.0);
#include "../../methods/interactions/interactionsNegativeOnWallNoSlip.cuh"
p[index].n.x = result.x;
p[index].n.y = result.y;
p[index].n.z = sqrt(pow2(result.x) + pow2(result.y));
}
}
|
62928bd17b09aa33300c0eefc781c1f060966db9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "bcnn_forward_upsample_cuda_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t dst_sz = 1;
float *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
int w = XSIZE;
int h = YSIZE;
int c = 2;
int n = XSIZE*YSIZE;
int size = XSIZE*YSIZE;
float *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
bcnn_forward_upsample_cuda_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dst_sz,src,w,h,c,n,size,dst);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
bcnn_forward_upsample_cuda_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dst_sz,src,w,h,c,n,size,dst);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
bcnn_forward_upsample_cuda_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dst_sz,src,w,h,c,n,size,dst);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 62928bd17b09aa33300c0eefc781c1f060966db9.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "bcnn_forward_upsample_cuda_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t dst_sz = 1;
float *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
int w = XSIZE;
int h = YSIZE;
int c = 2;
int n = XSIZE*YSIZE;
int size = XSIZE*YSIZE;
float *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
bcnn_forward_upsample_cuda_kernel<<<gridBlock,threadBlock>>>(dst_sz,src,w,h,c,n,size,dst);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
bcnn_forward_upsample_cuda_kernel<<<gridBlock,threadBlock>>>(dst_sz,src,w,h,c,n,size,dst);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
bcnn_forward_upsample_cuda_kernel<<<gridBlock,threadBlock>>>(dst_sz,src,w,h,c,n,size,dst);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
5ed0a11f762d648df604f44cf5e85d3fe7693264.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#include <stdio.h>
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
int i, j;
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x >= numCols || y >= numRows) {
return;
}
// fork another kernel to parallel???
// should we assume filterWidth is an odd number?
float blurred = 0;
for (i=0; i<filterWidth; i++) {
for (j=0; j<filterWidth; j++) {
// weight of the filter pixel
float pw = filter[filterWidth * j + i];
// coordinate of the filter pixel
int px = x - filterWidth/2 + i;
int py = y - filterWidth/2 + j;
// clamp to boundary of the image
if (px < 0) px = 0;
if (px >= numCols) px = numCols - 1;
if (py < 0) py = 0;
if (py >= numRows) py = numRows - 1;
// color of the filter pixel we are going to handle
float pc = inputChannel[numCols * py + px];
blurred += pw * pc;
}
}
outputChannel[numCols * y + x] = blurred;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x >= numCols || y >= numRows) {
return;
}
redChannel [numCols * y + x] = inputImageRGBA[numCols * y + x].x;
greenChannel[numCols * y + x] = inputImageRGBA[numCols * y + x].y;
blueChannel [numCols * y + x] = inputImageRGBA[numCols * y + x].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice));
printf("Ender: this is the filter (%d x %d):\n\n", filterWidth, filterWidth);
unsigned int i, j;
for (i=0; i<filterWidth; i++) {
for (j=0; j<filterWidth; j++) {
printf("%c%f%c",j==0?'\t':' ', h_filter[filterWidth*i+j], j==filterWidth-1?'\n':' ');
}
}
printf("\n");
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(32, 32);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols/blockSize.x+1, numRows/blockSize.y+1);
printf("Ender: Image dimension - %dC x %dR\n", numCols, numRows);
printf("Ender: block %d x %d, grid %d x %d\n", blockSize.x, blockSize.y, gridSize.x, gridSize.y);
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_filter));
}
| 5ed0a11f762d648df604f44cf5e85d3fe7693264.cu | // Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#include <stdio.h>
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
int i, j;
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x >= numCols || y >= numRows) {
return;
}
// fork another kernel to parallel???
// should we assume filterWidth is an odd number?
float blurred = 0;
for (i=0; i<filterWidth; i++) {
for (j=0; j<filterWidth; j++) {
// weight of the filter pixel
float pw = filter[filterWidth * j + i];
// coordinate of the filter pixel
int px = x - filterWidth/2 + i;
int py = y - filterWidth/2 + j;
// clamp to boundary of the image
if (px < 0) px = 0;
if (px >= numCols) px = numCols - 1;
if (py < 0) py = 0;
if (py >= numRows) py = numRows - 1;
// color of the filter pixel we are going to handle
float pc = inputChannel[numCols * py + px];
blurred += pw * pc;
}
}
outputChannel[numCols * y + x] = blurred;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x >= numCols || y >= numRows) {
return;
}
redChannel [numCols * y + x] = inputImageRGBA[numCols * y + x].x;
greenChannel[numCols * y + x] = inputImageRGBA[numCols * y + x].y;
blueChannel [numCols * y + x] = inputImageRGBA[numCols * y + x].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
printf("Ender: this is the filter (%d x %d):\n\n", filterWidth, filterWidth);
unsigned int i, j;
for (i=0; i<filterWidth; i++) {
for (j=0; j<filterWidth; j++) {
printf("%c%f%c",j==0?'\t':' ', h_filter[filterWidth*i+j], j==filterWidth-1?'\n':' ');
}
}
printf("\n");
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(32, 32);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols/blockSize.x+1, numRows/blockSize.y+1);
printf("Ender: Image dimension - %dC x %dR\n", numCols, numRows);
printf("Ender: block %d x %d, grid %d x %d\n", blockSize.x, blockSize.y, gridSize.x, gridSize.y);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_filter));
}
|
c789a5cd929d0a89320b53ab560d9a80a4d90981.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "ext_cuda_chunk.hpp"
#include "kernels/set_field.cuknl"
/*
* SET FIELD KERNEL
* Sets energy1 to energy0.
*/
// Entry point for the the set field method.
extern "C"
void ext_set_field_kernel_(const int* chunk)
{
Chunks[*chunk-1]->SetField();
}
// Copies energy0 into energy1.
void TeaLeafCudaChunk::SetField()
{
PRE_KERNEL(0);
hipLaunchKernelGGL(( CuKnlSetField), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0,
innerX, innerY, dEnergy0, dEnergy1);
POST_KERNEL("Set Field");
}
| c789a5cd929d0a89320b53ab560d9a80a4d90981.cu | #include <stdio.h>
#include "ext_cuda_chunk.hpp"
#include "kernels/set_field.cuknl"
/*
* SET FIELD KERNEL
* Sets energy1 to energy0.
*/
// Entry point for the the set field method.
extern "C"
void ext_set_field_kernel_(const int* chunk)
{
Chunks[*chunk-1]->SetField();
}
// Copies energy0 into energy1.
void TeaLeafCudaChunk::SetField()
{
PRE_KERNEL(0);
CuKnlSetField<<<numBlocks, BLOCK_SIZE>>>(
innerX, innerY, dEnergy0, dEnergy1);
POST_KERNEL("Set Field");
}
|
43cf339b4cb18f48ea7507d0705f4ef7f2bda8eb.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/utils/MatrixMult.cuh>
#include <faiss/gpu/utils/DeviceMemory.h>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/HostTensor.cuh>
namespace faiss { namespace gpu {
template <typename T>
struct CublasGemm {
};
template <>
struct CublasGemm<float> {
static hipblasStatus_t gemm(hipblasHandle_t handle,
hipblasOperation_t transa,
hipblasOperation_t transb,
int m,
int n,
int k,
float fAlpha,
const float *A,
int lda,
const float *B,
int ldb,
float fBeta,
float *C,
int ldc) {
return hipblasSgemm(handle, transa, transb, m, n, k,
&fAlpha, A, lda, B, ldb, &fBeta, C, ldc);
}
};
template <>
struct CublasGemm<half> {
static hipblasStatus_t gemm(hipblasHandle_t handle,
hipblasOperation_t transa,
hipblasOperation_t transb,
int m,
int n,
int k,
const float fAlpha,
const half *A,
int lda,
const half *B,
int ldb,
const float fBeta,
float *C,
int ldc) {
// Always accumulate in f32
return cublasSgemmEx(handle, transa, transb, m, n, k,
&fAlpha, A, HIP_R_16F, lda,
B, HIP_R_16F, ldb,
&fBeta,
C, HIP_R_32F, ldc);
}
};
template <typename T>
void
runMatrixMult(Tensor<float, 2, true>& c, bool transC,
Tensor<T, 2, true>& a, bool transA,
Tensor<T, 2, true>& b, bool transB,
float alpha,
float beta,
hipblasHandle_t handle,
hipStream_t stream) {
hipblasSetStream(handle, stream);
// Check that we have (m x k) * (k x n) = (m x n)
// using the input row-major layout
int aM = transA ? a.getSize(1) : a.getSize(0);
int aK = transA ? a.getSize(0) : a.getSize(1);
int bK = transB ? b.getSize(1) : b.getSize(0);
int bN = transB ? b.getSize(0) : b.getSize(1);
int cM = transC ? c.getSize(1) : c.getSize(0);
int cN = transC ? c.getSize(0) : c.getSize(1);
FAISS_ASSERT(aM == cM);
FAISS_ASSERT(aK == bK);
FAISS_ASSERT(bN == cN);
FAISS_ASSERT(a.getStride(1) == 1);
FAISS_ASSERT(b.getStride(1) == 1);
FAISS_ASSERT(c.getStride(1) == 1);
// Now, we have to represent the matrix multiplication in
// column-major layout
T* pA = transC ? a.data() : b.data();
T* pB = transC ? b.data() : a.data();
float* pC = c.data();
int m = c.getSize(1); // stride 1 size
int n = c.getSize(0); // other size
int k = transA ? a.getSize(0) : a.getSize(1);
int lda = transC ? a.getStride(0) : b.getStride(0);
int ldb = transC ? b.getStride(0) : a.getStride(0);
int ldc = c.getStride(0);
auto gemmTrA = transB ? HIPBLAS_OP_T : HIPBLAS_OP_N;
auto gemmTrB = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N;
if (transC) {
gemmTrA = transA ? HIPBLAS_OP_N : HIPBLAS_OP_T;
gemmTrB = transB ? HIPBLAS_OP_N : HIPBLAS_OP_T;
}
auto err = CublasGemm<T>::gemm(handle,
gemmTrA, gemmTrB,
m, n, k, alpha,
pA, lda, pB, ldb, beta,
pC, ldc);
FAISS_ASSERT_FMT(err == HIPBLAS_STATUS_SUCCESS,
"cublas failed (%d): "
"(%d, %d)%s x (%d, %d)%s = (%d, %d)%s",
(int) err,
a.getSize(0), a.getSize(1), transA ? "'" : "",
b.getSize(0), b.getSize(1), transB ? "'" : "",
c.getSize(0), c.getSize(1), transC ? "'" : "");
CUDA_TEST_ERROR();
}
void runMatrixMult(Tensor<float, 2, true>& c, bool transC,
Tensor<float, 2, true>& a, bool transA,
Tensor<float, 2, true>& b, bool transB,
float alpha,
float beta,
hipblasHandle_t handle,
hipStream_t stream) {
return runMatrixMult<float>(c, transC, a, transA, b, transB,
alpha, beta, handle, stream);
}
void runMatrixMult(Tensor<float, 2, true>& c, bool transC,
Tensor<half, 2, true>& a, bool transA,
Tensor<half, 2, true>& b, bool transB,
float alpha,
float beta,
hipblasHandle_t handle,
hipStream_t stream) {
return runMatrixMult<half>(c, transC, a, transA, b, transB,
alpha, beta, handle, stream);
}
void
runIteratedMatrixMult(Tensor<float, 3, true>& c, bool transC,
Tensor<float, 3, true>& a, bool transA,
Tensor<float, 3, true>& b, bool transB,
float alpha,
float beta,
hipblasHandle_t handle,
hipStream_t stream) {
FAISS_ASSERT(c.getSize(0) == a.getSize(0));
FAISS_ASSERT(a.getSize(0) == b.getSize(0));
for (int i = 0; i < a.getSize(0); ++i) {
auto cView = c[i].view();
auto aView = a[i].view();
auto bView = b[i].view();
runMatrixMult(cView, transC,
aView, transA,
bView, transB,
alpha, beta, handle, stream);
}
}
void
runBatchMatrixMult(Tensor<float, 3, true>& c, bool transC,
Tensor<float, 3, true>& a, bool transA,
Tensor<float, 3, true>& b, bool transB,
float alpha,
float beta,
DeviceMemory& mem,
hipblasHandle_t handle,
hipStream_t stream) {
FAISS_ASSERT(c.getSize(0) == a.getSize(0));
FAISS_ASSERT(a.getSize(0) == b.getSize(0));
hipblasSetStream(handle, stream);
// Check that we have (m x k) * (k x n) = (m x n)
// using the input row-major layout
int aM = transA ? a.getSize(2) : a.getSize(1);
int aK = transA ? a.getSize(1) : a.getSize(2);
int bK = transB ? b.getSize(2) : b.getSize(1);
int bN = transB ? b.getSize(1) : b.getSize(2);
int cM = transC ? c.getSize(2) : c.getSize(1);
int cN = transC ? c.getSize(1) : c.getSize(2);
FAISS_ASSERT(aM == cM);
FAISS_ASSERT(aK == bK);
FAISS_ASSERT(bN == cN);
// Now, we have to represent the matrix multiplication in
// column-major layout
float* pA = transC ? a.data() : b.data();
float* pB = transC ? b.data() : a.data();
float* pC = c.data();
int m = c.getSize(2); // stride 1 size
int n = c.getSize(1); // other size
int k = transA ? a.getSize(1) : a.getSize(2);
int lda = transC ? a.getStride(1) : b.getStride(1);
int ldb = transC ? b.getStride(1) : a.getStride(1);
int ldc = c.getStride(1);
auto gemmTrA = transB ? HIPBLAS_OP_T : HIPBLAS_OP_N;
auto gemmTrB = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N;
if (transC) {
gemmTrA = transA ? HIPBLAS_OP_N : HIPBLAS_OP_T;
gemmTrB = transB ? HIPBLAS_OP_N : HIPBLAS_OP_T;
}
HostTensor<float*, 1, true> hostA({a.getSize(0)});
HostTensor<float*, 1, true> hostB({b.getSize(0)});
HostTensor<float*, 1, true> hostC({c.getSize(0)});
size_t aOffset = a.getStride(0);
size_t bOffset = b.getStride(0);
size_t cOffset = c.getStride(0);
for (int i = 0; i < a.getSize(0); ++i) {
hostA[i] = transC ? a.data() + i * aOffset : b.data() + i * bOffset;
hostB[i] = transC ? b.data() + i * bOffset : a.data() + i * aOffset;
hostC[i] = c.data() + i * cOffset;
}
DeviceTensor<float*, 1, true> deviceA(mem, hostA, stream);
DeviceTensor<float*, 1, true> deviceB(mem, hostB, stream);
DeviceTensor<float*, 1, true> deviceC(mem, hostC, stream);
auto err =
hipblasSgemmBatched(handle,
gemmTrA, gemmTrB,
m, n, k, &alpha,
(const float**) deviceA.data(), lda,
(const float**) deviceB.data(), ldb, &beta,
deviceC.data(), ldc, a.getSize(0));
FAISS_ASSERT_FMT(err == HIPBLAS_STATUS_SUCCESS,
"hipblasSgemmBatched failed (%d)", (int) err);
CUDA_TEST_ERROR();
}
} } // namespace
| 43cf339b4cb18f48ea7507d0705f4ef7f2bda8eb.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/utils/MatrixMult.cuh>
#include <faiss/gpu/utils/DeviceMemory.h>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/HostTensor.cuh>
namespace faiss { namespace gpu {
template <typename T>
struct CublasGemm {
};
template <>
struct CublasGemm<float> {
static cublasStatus_t gemm(cublasHandle_t handle,
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
float fAlpha,
const float *A,
int lda,
const float *B,
int ldb,
float fBeta,
float *C,
int ldc) {
return cublasSgemm(handle, transa, transb, m, n, k,
&fAlpha, A, lda, B, ldb, &fBeta, C, ldc);
}
};
template <>
struct CublasGemm<half> {
static cublasStatus_t gemm(cublasHandle_t handle,
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
const float fAlpha,
const half *A,
int lda,
const half *B,
int ldb,
const float fBeta,
float *C,
int ldc) {
// Always accumulate in f32
return cublasSgemmEx(handle, transa, transb, m, n, k,
&fAlpha, A, CUDA_R_16F, lda,
B, CUDA_R_16F, ldb,
&fBeta,
C, CUDA_R_32F, ldc);
}
};
template <typename T>
void
runMatrixMult(Tensor<float, 2, true>& c, bool transC,
Tensor<T, 2, true>& a, bool transA,
Tensor<T, 2, true>& b, bool transB,
float alpha,
float beta,
cublasHandle_t handle,
cudaStream_t stream) {
cublasSetStream(handle, stream);
// Check that we have (m x k) * (k x n) = (m x n)
// using the input row-major layout
int aM = transA ? a.getSize(1) : a.getSize(0);
int aK = transA ? a.getSize(0) : a.getSize(1);
int bK = transB ? b.getSize(1) : b.getSize(0);
int bN = transB ? b.getSize(0) : b.getSize(1);
int cM = transC ? c.getSize(1) : c.getSize(0);
int cN = transC ? c.getSize(0) : c.getSize(1);
FAISS_ASSERT(aM == cM);
FAISS_ASSERT(aK == bK);
FAISS_ASSERT(bN == cN);
FAISS_ASSERT(a.getStride(1) == 1);
FAISS_ASSERT(b.getStride(1) == 1);
FAISS_ASSERT(c.getStride(1) == 1);
// Now, we have to represent the matrix multiplication in
// column-major layout
T* pA = transC ? a.data() : b.data();
T* pB = transC ? b.data() : a.data();
float* pC = c.data();
int m = c.getSize(1); // stride 1 size
int n = c.getSize(0); // other size
int k = transA ? a.getSize(0) : a.getSize(1);
int lda = transC ? a.getStride(0) : b.getStride(0);
int ldb = transC ? b.getStride(0) : a.getStride(0);
int ldc = c.getStride(0);
auto gemmTrA = transB ? CUBLAS_OP_T : CUBLAS_OP_N;
auto gemmTrB = transA ? CUBLAS_OP_T : CUBLAS_OP_N;
if (transC) {
gemmTrA = transA ? CUBLAS_OP_N : CUBLAS_OP_T;
gemmTrB = transB ? CUBLAS_OP_N : CUBLAS_OP_T;
}
auto err = CublasGemm<T>::gemm(handle,
gemmTrA, gemmTrB,
m, n, k, alpha,
pA, lda, pB, ldb, beta,
pC, ldc);
FAISS_ASSERT_FMT(err == CUBLAS_STATUS_SUCCESS,
"cublas failed (%d): "
"(%d, %d)%s x (%d, %d)%s = (%d, %d)%s",
(int) err,
a.getSize(0), a.getSize(1), transA ? "'" : "",
b.getSize(0), b.getSize(1), transB ? "'" : "",
c.getSize(0), c.getSize(1), transC ? "'" : "");
CUDA_TEST_ERROR();
}
void runMatrixMult(Tensor<float, 2, true>& c, bool transC,
Tensor<float, 2, true>& a, bool transA,
Tensor<float, 2, true>& b, bool transB,
float alpha,
float beta,
cublasHandle_t handle,
cudaStream_t stream) {
return runMatrixMult<float>(c, transC, a, transA, b, transB,
alpha, beta, handle, stream);
}
void runMatrixMult(Tensor<float, 2, true>& c, bool transC,
Tensor<half, 2, true>& a, bool transA,
Tensor<half, 2, true>& b, bool transB,
float alpha,
float beta,
cublasHandle_t handle,
cudaStream_t stream) {
return runMatrixMult<half>(c, transC, a, transA, b, transB,
alpha, beta, handle, stream);
}
void
runIteratedMatrixMult(Tensor<float, 3, true>& c, bool transC,
Tensor<float, 3, true>& a, bool transA,
Tensor<float, 3, true>& b, bool transB,
float alpha,
float beta,
cublasHandle_t handle,
cudaStream_t stream) {
FAISS_ASSERT(c.getSize(0) == a.getSize(0));
FAISS_ASSERT(a.getSize(0) == b.getSize(0));
for (int i = 0; i < a.getSize(0); ++i) {
auto cView = c[i].view();
auto aView = a[i].view();
auto bView = b[i].view();
runMatrixMult(cView, transC,
aView, transA,
bView, transB,
alpha, beta, handle, stream);
}
}
void
runBatchMatrixMult(Tensor<float, 3, true>& c, bool transC,
Tensor<float, 3, true>& a, bool transA,
Tensor<float, 3, true>& b, bool transB,
float alpha,
float beta,
DeviceMemory& mem,
cublasHandle_t handle,
cudaStream_t stream) {
FAISS_ASSERT(c.getSize(0) == a.getSize(0));
FAISS_ASSERT(a.getSize(0) == b.getSize(0));
cublasSetStream(handle, stream);
// Check that we have (m x k) * (k x n) = (m x n)
// using the input row-major layout
int aM = transA ? a.getSize(2) : a.getSize(1);
int aK = transA ? a.getSize(1) : a.getSize(2);
int bK = transB ? b.getSize(2) : b.getSize(1);
int bN = transB ? b.getSize(1) : b.getSize(2);
int cM = transC ? c.getSize(2) : c.getSize(1);
int cN = transC ? c.getSize(1) : c.getSize(2);
FAISS_ASSERT(aM == cM);
FAISS_ASSERT(aK == bK);
FAISS_ASSERT(bN == cN);
// Now, we have to represent the matrix multiplication in
// column-major layout
float* pA = transC ? a.data() : b.data();
float* pB = transC ? b.data() : a.data();
float* pC = c.data();
int m = c.getSize(2); // stride 1 size
int n = c.getSize(1); // other size
int k = transA ? a.getSize(1) : a.getSize(2);
int lda = transC ? a.getStride(1) : b.getStride(1);
int ldb = transC ? b.getStride(1) : a.getStride(1);
int ldc = c.getStride(1);
auto gemmTrA = transB ? CUBLAS_OP_T : CUBLAS_OP_N;
auto gemmTrB = transA ? CUBLAS_OP_T : CUBLAS_OP_N;
if (transC) {
gemmTrA = transA ? CUBLAS_OP_N : CUBLAS_OP_T;
gemmTrB = transB ? CUBLAS_OP_N : CUBLAS_OP_T;
}
HostTensor<float*, 1, true> hostA({a.getSize(0)});
HostTensor<float*, 1, true> hostB({b.getSize(0)});
HostTensor<float*, 1, true> hostC({c.getSize(0)});
size_t aOffset = a.getStride(0);
size_t bOffset = b.getStride(0);
size_t cOffset = c.getStride(0);
for (int i = 0; i < a.getSize(0); ++i) {
hostA[i] = transC ? a.data() + i * aOffset : b.data() + i * bOffset;
hostB[i] = transC ? b.data() + i * bOffset : a.data() + i * aOffset;
hostC[i] = c.data() + i * cOffset;
}
DeviceTensor<float*, 1, true> deviceA(mem, hostA, stream);
DeviceTensor<float*, 1, true> deviceB(mem, hostB, stream);
DeviceTensor<float*, 1, true> deviceC(mem, hostC, stream);
auto err =
cublasSgemmBatched(handle,
gemmTrA, gemmTrB,
m, n, k, &alpha,
(const float**) deviceA.data(), lda,
(const float**) deviceB.data(), ldb, &beta,
deviceC.data(), ldc, a.getSize(0));
FAISS_ASSERT_FMT(err == CUBLAS_STATUS_SUCCESS,
"cublasSgemmBatched failed (%d)", (int) err);
CUDA_TEST_ERROR();
}
} } // namespace
|
555c271e318c121828c824e72e42582a1cf5b8d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reduce.h"
__device__ float merge(float old,float opOutput,float *extraParams) {
return opOutput + old;
}
__device__ float update(float old,float opOutput,float *extraParams) {
return opOutput + old;
}
__device__ float op(float d1,float *extraParams) {
return powf(d1,2);
}
__device__ float postProcess(float reduction,int n,int xOffset,float *dx,int incx,float *params,float *result) {
return sqrtf(reduction);
}
extern "C"
__global__ void norm2_strided_float(int n, int xOffset,float *dx,int incx,float *params,float *result) {
transform(n,xOffset,dx,incx,params,result);
}
| 555c271e318c121828c824e72e42582a1cf5b8d6.cu | #include "reduce.h"
__device__ float merge(float old,float opOutput,float *extraParams) {
return opOutput + old;
}
__device__ float update(float old,float opOutput,float *extraParams) {
return opOutput + old;
}
__device__ float op(float d1,float *extraParams) {
return powf(d1,2);
}
__device__ float postProcess(float reduction,int n,int xOffset,float *dx,int incx,float *params,float *result) {
return sqrtf(reduction);
}
extern "C"
__global__ void norm2_strided_float(int n, int xOffset,float *dx,int incx,float *params,float *result) {
transform(n,xOffset,dx,incx,params,result);
}
|
26d054da9b7c495c7e472c2149ad4986f7a3fb38.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "tsdf.h"
__global__ void fusion_gpu(Volume vol, Views views, float truncation_distance)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int vox_res = vol.vol_dim_w * vol.vol_dim_h * vol.vol_dim_d;
for(int vox_idx = index; vox_idx < vox_res; vox_idx += stride)
{
int i, j, k;
idx2ijk(vox_idx, vol.vol_dim_w, vol.vol_dim_h, vol.vol_dim_d, i, j, k);
float x, y, z;
ijk2xyz(i, j, k, vol.voxel_size, vol.origin_x, vol.origin_y, vol.origin_z, x, y, z);
//printf("idx=%d, i=%d, j=%d, k=%d, x=%f, y=%f, z=%f\n",vox_idx, i, j, k, x, y, z);
//printf("idx=%d, vidx=%d\n", vox_idx, ((vol.vol_dim + k) * vol.vol_dim + j) * vol.vol_dim + i);
bool vol_idx_updated = false;
for (int idx = 0; idx < views.n_views; ++idx)
{
float _u, _v, _z;
xyz2uv(idx, &views, x, y, z, _u, _v, _z);
int u, v;
u = int(_u + 0.5);
v = int(_v + 0.5);
//if(k==103 && j==130 && i==153) printf("u=%d, v=%d, ur=%f, vr=%f\n", u,v, _u,_v);
//printf("Debug: %d %d %d %f %f %f %f %f %f %d %d\n", i, j, k, x, y, z, _u, _v, _d, u, v);
//printf("%d %d\n", views.rows, views.cols);
if (u >= 0 && u < views.cols && v >= 0 && v < views.rows) {
int depth_idx = (idx * views.rows + v) * views.cols + u;
float depth = views.depth[depth_idx];
float weight = views.weight[depth_idx];
float sdf = depth - _z;
//if(k==103 && j==130 && i==153) printf(" dm_d=%f, dm_idx=%d, u=%d, v=%d, ur=%f, vr=%f\n", depth, depth_idx, u,v, _u, _v);
//printf("ss %f %f\n", depth_diff, truncated_depth);
if (depth > 0 && sdf >= -truncation_distance) {
float tsdf = fminf(1, fmaxf(-1, sdf / truncation_distance));
// add to volume
float new_weight = vol.weight[vox_idx] + weight;
float new_value = (vol.data[vox_idx] * vol.weight[vox_idx] + tsdf * weight) / new_weight;
vol.data[vox_idx] = new_value;
vol.weight[vox_idx] = new_weight;
vol_idx_updated = true;
}
}
}
if (vol_idx_updated == false) {
vol.data[vox_idx] = -1;
vol.weight[vox_idx] = 0;
}
}
}
void fusion(Volume &vol, Views &views, float truncation_distance)
{
/**
* Allocate memory for GPU
*/
Volume vol_gpu;
Views views_gpu;
mem_alloc_views_gpu(views_gpu, views);
mem_alloc_volume_gpu(vol_gpu, vol);
hipLaunchKernelGGL(( fusion_gpu), dim3(getNumBlock(vol_gpu.vol_dim_w * vol_gpu.vol_dim_h * vol_gpu.vol_dim_d, THREADS_PER_BLOCK)), dim3(THREADS_PER_BLOCK), 0, 0, vol_gpu, views_gpu, truncation_distance);
//fusion_gpu<<<1, 1>>>(vol_gpu, views_gpu, truncation_distance);
hipDeviceSynchronize();
mem_alloc_volume_cpu(vol_gpu, vol);
mem_free_views_gpu(views_gpu);
mem_free_volume_gpu(vol_gpu);
} | 26d054da9b7c495c7e472c2149ad4986f7a3fb38.cu | #include "tsdf.h"
__global__ void fusion_gpu(Volume vol, Views views, float truncation_distance)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int vox_res = vol.vol_dim_w * vol.vol_dim_h * vol.vol_dim_d;
for(int vox_idx = index; vox_idx < vox_res; vox_idx += stride)
{
int i, j, k;
idx2ijk(vox_idx, vol.vol_dim_w, vol.vol_dim_h, vol.vol_dim_d, i, j, k);
float x, y, z;
ijk2xyz(i, j, k, vol.voxel_size, vol.origin_x, vol.origin_y, vol.origin_z, x, y, z);
//printf("idx=%d, i=%d, j=%d, k=%d, x=%f, y=%f, z=%f\n",vox_idx, i, j, k, x, y, z);
//printf("idx=%d, vidx=%d\n", vox_idx, ((vol.vol_dim + k) * vol.vol_dim + j) * vol.vol_dim + i);
bool vol_idx_updated = false;
for (int idx = 0; idx < views.n_views; ++idx)
{
float _u, _v, _z;
xyz2uv(idx, &views, x, y, z, _u, _v, _z);
int u, v;
u = int(_u + 0.5);
v = int(_v + 0.5);
//if(k==103 && j==130 && i==153) printf("u=%d, v=%d, ur=%f, vr=%f\n", u,v, _u,_v);
//printf("Debug: %d %d %d %f %f %f %f %f %f %d %d\n", i, j, k, x, y, z, _u, _v, _d, u, v);
//printf("%d %d\n", views.rows, views.cols);
if (u >= 0 && u < views.cols && v >= 0 && v < views.rows) {
int depth_idx = (idx * views.rows + v) * views.cols + u;
float depth = views.depth[depth_idx];
float weight = views.weight[depth_idx];
float sdf = depth - _z;
//if(k==103 && j==130 && i==153) printf(" dm_d=%f, dm_idx=%d, u=%d, v=%d, ur=%f, vr=%f\n", depth, depth_idx, u,v, _u, _v);
//printf("ss %f %f\n", depth_diff, truncated_depth);
if (depth > 0 && sdf >= -truncation_distance) {
float tsdf = fminf(1, fmaxf(-1, sdf / truncation_distance));
// add to volume
float new_weight = vol.weight[vox_idx] + weight;
float new_value = (vol.data[vox_idx] * vol.weight[vox_idx] + tsdf * weight) / new_weight;
vol.data[vox_idx] = new_value;
vol.weight[vox_idx] = new_weight;
vol_idx_updated = true;
}
}
}
if (vol_idx_updated == false) {
vol.data[vox_idx] = -1;
vol.weight[vox_idx] = 0;
}
}
}
void fusion(Volume &vol, Views &views, float truncation_distance)
{
/**
* Allocate memory for GPU
*/
Volume vol_gpu;
Views views_gpu;
mem_alloc_views_gpu(views_gpu, views);
mem_alloc_volume_gpu(vol_gpu, vol);
fusion_gpu<<<getNumBlock(vol_gpu.vol_dim_w * vol_gpu.vol_dim_h * vol_gpu.vol_dim_d, THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(vol_gpu, views_gpu, truncation_distance);
//fusion_gpu<<<1, 1>>>(vol_gpu, views_gpu, truncation_distance);
cudaDeviceSynchronize();
mem_alloc_volume_cpu(vol_gpu, vol);
mem_free_views_gpu(views_gpu);
mem_free_volume_gpu(vol_gpu);
} |
ed7dd44d0e1fa62152d867cda6a072959240b6f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts,
bool use_label_smooth = false, float label_smooth_factor, int num_classes) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
if (use_label_smooth && (label_smooth_factor > 0.0F)) {
for (int c = 0; c < num_classes; c++) {
float coeff = (c == label_value) ? (1.0F - label_smooth_factor)
: (label_smooth_factor / float(num_classes));
loss[index] -= coeff * log(max(prob_data[n * dim + c * spatial_dim + s], Dtype(FLT_MIN)));
}
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
}
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything, we use it here to avoid having
// to allocate new GPU memory to accumulate intermediate results.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts,
use_label_smooth_, label_smooth_factor_, num_classes_);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
// Clear scratch memory to prevent interfering with backward (see #6202).
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff());
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts,
bool use_label_smooth = false, float label_smooth_factor, int num_classes) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
if (use_label_smooth && label_smooth_factor > 0.0F) {
for (int c = 0; c < num_classes; ++i) {
float coeff = (c == label_value) ? (1.0F - label_smooth_factor)
: (label_smooth_factor / float(num_classes));
bottom_diff[n * dim + c * spatial_dim + s] -= coeff;
}
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
}
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts,
use_label_smooth_, label_smooth_factor_, num_classes_);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
| ed7dd44d0e1fa62152d867cda6a072959240b6f1.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts,
bool use_label_smooth = false, float label_smooth_factor, int num_classes) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
if (use_label_smooth && (label_smooth_factor > 0.0F)) {
for (int c = 0; c < num_classes; c++) {
float coeff = (c == label_value) ? (1.0F - label_smooth_factor)
: (label_smooth_factor / float(num_classes));
loss[index] -= coeff * log(max(prob_data[n * dim + c * spatial_dim + s], Dtype(FLT_MIN)));
}
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
}
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything, we use it here to avoid having
// to allocate new GPU memory to accumulate intermediate results.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts,
use_label_smooth_, label_smooth_factor_, num_classes_);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
// Clear scratch memory to prevent interfering with backward (see #6202).
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff());
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts,
bool use_label_smooth = false, float label_smooth_factor, int num_classes) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
if (use_label_smooth && label_smooth_factor > 0.0F) {
for (int c = 0; c < num_classes; ++i) {
float coeff = (c == label_value) ? (1.0F - label_smooth_factor)
: (label_smooth_factor / float(num_classes));
bottom_diff[n * dim + c * spatial_dim + s] -= coeff;
}
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
}
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts,
use_label_smooth_, label_smooth_factor_, num_classes_);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
|
segmentSieve.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ static void segmentSieve(char *primes, uint64_t max) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index>0){
const uint64_t maxRoot = sqrt((double)max);
int low = maxRoot*index;
int high = low + maxRoot;
if(high > max) high = max;
for (int i = 2; i < maxRoot; i++){ //sqrt(n)lglg(sqrt(n))
if(primes[i]==0){
int loLim = (low / i) * i;
if (loLim < low)
loLim += i;
for (int j=loLim; j<high; j+=i)
primes[j] = 1;
}
}
}
} | segmentSieve.cu | #include "includes.h"
__global__ static void segmentSieve(char *primes, uint64_t max) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index>0){
const uint64_t maxRoot = sqrt((double)max);
int low = maxRoot*index;
int high = low + maxRoot;
if(high > max) high = max;
for (int i = 2; i < maxRoot; i++){ //sqrt(n)lglg(sqrt(n))
if(primes[i]==0){
int loLim = (low / i) * i;
if (loLim < low)
loLim += i;
for (int j=loLim; j<high; j+=i)
primes[j] = 1;
}
}
}
} |
cf0edb9cbd73dc5e8a61317f145d32bcdb7a55d2.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017 XGBoost contributors
*/
#include <dmlc/logging.h>
#include <dmlc/filesystem.h>
#include <xgboost/c_api.h>
#include <xgboost/predictor.h>
#include <string>
#include "gtest/gtest.h"
#include "../helpers.h"
namespace {
inline void CheckCAPICall(int ret) {
ASSERT_EQ(ret, 0) << XGBGetLastError();
}
} // namespace anonymous
extern const std::map<std::string, std::string>&
QueryBoosterConfigurationArguments(BoosterHandle handle);
namespace xgboost {
namespace predictor {
TEST(gpu_predictor, Test) {
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor"));
std::unique_ptr<Predictor> cpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("cpu_predictor"));
gpu_predictor->Init({}, {});
cpu_predictor->Init({}, {});
gbm::GBTreeModel model = CreateTestModel();
int n_row = 5;
int n_col = 5;
auto dmat = CreateDMatrix(n_row, n_col, 0);
// Test predict batch
HostDeviceVector<float> gpu_out_predictions;
HostDeviceVector<float> cpu_out_predictions;
gpu_predictor->PredictBatch((*dmat).get(), &gpu_out_predictions, model, 0);
cpu_predictor->PredictBatch((*dmat).get(), &cpu_out_predictions, model, 0);
std::vector<float>& gpu_out_predictions_h = gpu_out_predictions.HostVector();
std::vector<float>& cpu_out_predictions_h = cpu_out_predictions.HostVector();
float abs_tolerance = 0.001;
for (int i = 0; i < gpu_out_predictions.Size(); i++) {
ASSERT_NEAR(gpu_out_predictions_h[i], cpu_out_predictions_h[i], abs_tolerance);
}
// Test predict instance
const auto &batch = *(*dmat)->GetRowBatches().begin();
for (int i = 0; i < batch.Size(); i++) {
std::vector<float> gpu_instance_out_predictions;
std::vector<float> cpu_instance_out_predictions;
cpu_predictor->PredictInstance(batch[i], &cpu_instance_out_predictions,
model);
gpu_predictor->PredictInstance(batch[i], &gpu_instance_out_predictions,
model);
ASSERT_EQ(gpu_instance_out_predictions[0], cpu_instance_out_predictions[0]);
}
// Test predict leaf
std::vector<float> gpu_leaf_out_predictions;
std::vector<float> cpu_leaf_out_predictions;
cpu_predictor->PredictLeaf((*dmat).get(), &cpu_leaf_out_predictions, model);
gpu_predictor->PredictLeaf((*dmat).get(), &gpu_leaf_out_predictions, model);
for (int i = 0; i < gpu_leaf_out_predictions.size(); i++) {
ASSERT_EQ(gpu_leaf_out_predictions[i], cpu_leaf_out_predictions[i]);
}
// Test predict contribution
std::vector<float> gpu_out_contribution;
std::vector<float> cpu_out_contribution;
cpu_predictor->PredictContribution((*dmat).get(), &cpu_out_contribution, model);
gpu_predictor->PredictContribution((*dmat).get(), &gpu_out_contribution, model);
for (int i = 0; i < gpu_out_contribution.size(); i++) {
ASSERT_EQ(gpu_out_contribution[i], cpu_out_contribution[i]);
}
delete dmat;
}
TEST(gpu_predictor, ExternalMemoryTest) {
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor"));
gpu_predictor->Init({}, {});
gbm::GBTreeModel model = CreateTestModel();
std::unique_ptr<DMatrix> dmat = CreateSparsePageDMatrix(32, 64);
// Test predict batch
HostDeviceVector<float> out_predictions;
gpu_predictor->PredictBatch(dmat.get(), &out_predictions, model, 0);
EXPECT_EQ(out_predictions.Size(), dmat->Info().num_row_);
for (const auto& v : out_predictions.HostVector()) {
ASSERT_EQ(v, 1.5);
}
// Test predict leaf
std::vector<float> leaf_out_predictions;
gpu_predictor->PredictLeaf(dmat.get(), &leaf_out_predictions, model);
EXPECT_EQ(leaf_out_predictions.size(), dmat->Info().num_row_);
for (const auto& v : leaf_out_predictions) {
ASSERT_EQ(v, 0);
}
// Test predict contribution
std::vector<float> out_contribution;
gpu_predictor->PredictContribution(dmat.get(), &out_contribution, model);
EXPECT_EQ(out_contribution.size(), dmat->Info().num_row_);
for (const auto& v : out_contribution) {
ASSERT_EQ(v, 1.5);
}
// Test predict contribution (approximate method)
std::vector<float> out_contribution_approximate;
gpu_predictor->PredictContribution(dmat.get(), &out_contribution_approximate, model, true);
EXPECT_EQ(out_contribution_approximate.size(), dmat->Info().num_row_);
for (const auto& v : out_contribution_approximate) {
ASSERT_EQ(v, 1.5);
}
}
#if defined(XGBOOST_USE_NCCL)
// Test whether pickling preserves predictor parameters
TEST(gpu_predictor, MGPU_PicklingTest) {
int ngpu;
dh::safe_cuda(hipGetDeviceCount(&ngpu));
dmlc::TemporaryDirectory tempdir;
const std::string tmp_file = tempdir.path + "/simple.libsvm";
CreateBigTestData(tmp_file, 600);
DMatrixHandle dmat[1];
BoosterHandle bst, bst2;
std::vector<bst_float> label;
for (int i = 0; i < 200; ++i) {
label.push_back((i % 2 ? 1 : 0));
}
// Load data matrix
CheckCAPICall(XGDMatrixCreateFromFile(tmp_file.c_str(), 0, &dmat[0]));
CheckCAPICall(XGDMatrixSetFloatInfo(dmat[0], "label", label.data(), 200));
// Create booster
CheckCAPICall(XGBoosterCreate(dmat, 1, &bst));
// Set parameters
CheckCAPICall(XGBoosterSetParam(bst, "seed", "0"));
CheckCAPICall(XGBoosterSetParam(bst, "base_score", "0.5"));
CheckCAPICall(XGBoosterSetParam(bst, "booster", "gbtree"));
CheckCAPICall(XGBoosterSetParam(bst, "learning_rate", "0.01"));
CheckCAPICall(XGBoosterSetParam(bst, "max_depth", "8"));
CheckCAPICall(XGBoosterSetParam(bst, "objective", "binary:logistic"));
CheckCAPICall(XGBoosterSetParam(bst, "seed", "123"));
CheckCAPICall(XGBoosterSetParam(bst, "tree_method", "gpu_hist"));
CheckCAPICall(XGBoosterSetParam(bst, "n_gpus", std::to_string(ngpu).c_str()));
CheckCAPICall(XGBoosterSetParam(bst, "predictor", "gpu_predictor"));
// Run boosting iterations
for (int i = 0; i < 10; ++i) {
CheckCAPICall(XGBoosterUpdateOneIter(bst, i, dmat[0]));
}
// Delete matrix
CheckCAPICall(XGDMatrixFree(dmat[0]));
// Pickle
const char* dptr;
bst_ulong len;
std::string buf;
CheckCAPICall(XGBoosterGetModelRaw(bst, &len, &dptr));
buf = std::string(dptr, len);
CheckCAPICall(XGBoosterFree(bst));
// Unpickle
CheckCAPICall(XGBoosterCreate(nullptr, 0, &bst2));
CheckCAPICall(XGBoosterLoadModelFromBuffer(bst2, buf.c_str(), len));
{ // Query predictor
const auto& kwargs = QueryBoosterConfigurationArguments(bst2);
ASSERT_EQ(kwargs.at("predictor"), "gpu_predictor");
ASSERT_EQ(kwargs.at("n_gpus"), std::to_string(ngpu).c_str());
}
{ // Change n_gpus and query again
CheckCAPICall(XGBoosterSetParam(bst2, "n_gpus", "1"));
const auto& kwargs = QueryBoosterConfigurationArguments(bst2);
ASSERT_EQ(kwargs.at("n_gpus"), "1");
}
{ // Change predictor and query again
CheckCAPICall(XGBoosterSetParam(bst2, "predictor", "cpu_predictor"));
const auto& kwargs = QueryBoosterConfigurationArguments(bst2);
ASSERT_EQ(kwargs.at("predictor"), "cpu_predictor");
}
CheckCAPICall(XGBoosterFree(bst2));
}
#endif // defined(XGBOOST_USE_NCCL)
#if defined(XGBOOST_USE_NCCL)
// multi-GPU predictor test
TEST(gpu_predictor, MGPU_Test) {
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor"));
std::unique_ptr<Predictor> cpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("cpu_predictor"));
gpu_predictor->Init({std::pair<std::string, std::string>("n_gpus", "-1")}, {});
cpu_predictor->Init({}, {});
for (size_t i = 1; i < 33; i *= 2) {
int n_row = i, n_col = i;
auto dmat = CreateDMatrix(n_row, n_col, 0);
gbm::GBTreeModel model = CreateTestModel();
// Test predict batch
HostDeviceVector<float> gpu_out_predictions;
HostDeviceVector<float> cpu_out_predictions;
gpu_predictor->PredictBatch((*dmat).get(), &gpu_out_predictions, model, 0);
cpu_predictor->PredictBatch((*dmat).get(), &cpu_out_predictions, model, 0);
std::vector<float>& gpu_out_predictions_h = gpu_out_predictions.HostVector();
std::vector<float>& cpu_out_predictions_h = cpu_out_predictions.HostVector();
float abs_tolerance = 0.001;
for (int j = 0; j < gpu_out_predictions.Size(); j++) {
ASSERT_NEAR(gpu_out_predictions_h[j], cpu_out_predictions_h[j], abs_tolerance);
}
delete dmat;
}
}
#endif // defined(XGBOOST_USE_NCCL)
} // namespace predictor
} // namespace xgboost
| cf0edb9cbd73dc5e8a61317f145d32bcdb7a55d2.cu |
/*!
* Copyright 2017 XGBoost contributors
*/
#include <dmlc/logging.h>
#include <dmlc/filesystem.h>
#include <xgboost/c_api.h>
#include <xgboost/predictor.h>
#include <string>
#include "gtest/gtest.h"
#include "../helpers.h"
namespace {
inline void CheckCAPICall(int ret) {
ASSERT_EQ(ret, 0) << XGBGetLastError();
}
} // namespace anonymous
extern const std::map<std::string, std::string>&
QueryBoosterConfigurationArguments(BoosterHandle handle);
namespace xgboost {
namespace predictor {
TEST(gpu_predictor, Test) {
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor"));
std::unique_ptr<Predictor> cpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("cpu_predictor"));
gpu_predictor->Init({}, {});
cpu_predictor->Init({}, {});
gbm::GBTreeModel model = CreateTestModel();
int n_row = 5;
int n_col = 5;
auto dmat = CreateDMatrix(n_row, n_col, 0);
// Test predict batch
HostDeviceVector<float> gpu_out_predictions;
HostDeviceVector<float> cpu_out_predictions;
gpu_predictor->PredictBatch((*dmat).get(), &gpu_out_predictions, model, 0);
cpu_predictor->PredictBatch((*dmat).get(), &cpu_out_predictions, model, 0);
std::vector<float>& gpu_out_predictions_h = gpu_out_predictions.HostVector();
std::vector<float>& cpu_out_predictions_h = cpu_out_predictions.HostVector();
float abs_tolerance = 0.001;
for (int i = 0; i < gpu_out_predictions.Size(); i++) {
ASSERT_NEAR(gpu_out_predictions_h[i], cpu_out_predictions_h[i], abs_tolerance);
}
// Test predict instance
const auto &batch = *(*dmat)->GetRowBatches().begin();
for (int i = 0; i < batch.Size(); i++) {
std::vector<float> gpu_instance_out_predictions;
std::vector<float> cpu_instance_out_predictions;
cpu_predictor->PredictInstance(batch[i], &cpu_instance_out_predictions,
model);
gpu_predictor->PredictInstance(batch[i], &gpu_instance_out_predictions,
model);
ASSERT_EQ(gpu_instance_out_predictions[0], cpu_instance_out_predictions[0]);
}
// Test predict leaf
std::vector<float> gpu_leaf_out_predictions;
std::vector<float> cpu_leaf_out_predictions;
cpu_predictor->PredictLeaf((*dmat).get(), &cpu_leaf_out_predictions, model);
gpu_predictor->PredictLeaf((*dmat).get(), &gpu_leaf_out_predictions, model);
for (int i = 0; i < gpu_leaf_out_predictions.size(); i++) {
ASSERT_EQ(gpu_leaf_out_predictions[i], cpu_leaf_out_predictions[i]);
}
// Test predict contribution
std::vector<float> gpu_out_contribution;
std::vector<float> cpu_out_contribution;
cpu_predictor->PredictContribution((*dmat).get(), &cpu_out_contribution, model);
gpu_predictor->PredictContribution((*dmat).get(), &gpu_out_contribution, model);
for (int i = 0; i < gpu_out_contribution.size(); i++) {
ASSERT_EQ(gpu_out_contribution[i], cpu_out_contribution[i]);
}
delete dmat;
}
TEST(gpu_predictor, ExternalMemoryTest) {
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor"));
gpu_predictor->Init({}, {});
gbm::GBTreeModel model = CreateTestModel();
std::unique_ptr<DMatrix> dmat = CreateSparsePageDMatrix(32, 64);
// Test predict batch
HostDeviceVector<float> out_predictions;
gpu_predictor->PredictBatch(dmat.get(), &out_predictions, model, 0);
EXPECT_EQ(out_predictions.Size(), dmat->Info().num_row_);
for (const auto& v : out_predictions.HostVector()) {
ASSERT_EQ(v, 1.5);
}
// Test predict leaf
std::vector<float> leaf_out_predictions;
gpu_predictor->PredictLeaf(dmat.get(), &leaf_out_predictions, model);
EXPECT_EQ(leaf_out_predictions.size(), dmat->Info().num_row_);
for (const auto& v : leaf_out_predictions) {
ASSERT_EQ(v, 0);
}
// Test predict contribution
std::vector<float> out_contribution;
gpu_predictor->PredictContribution(dmat.get(), &out_contribution, model);
EXPECT_EQ(out_contribution.size(), dmat->Info().num_row_);
for (const auto& v : out_contribution) {
ASSERT_EQ(v, 1.5);
}
// Test predict contribution (approximate method)
std::vector<float> out_contribution_approximate;
gpu_predictor->PredictContribution(dmat.get(), &out_contribution_approximate, model, true);
EXPECT_EQ(out_contribution_approximate.size(), dmat->Info().num_row_);
for (const auto& v : out_contribution_approximate) {
ASSERT_EQ(v, 1.5);
}
}
#if defined(XGBOOST_USE_NCCL)
// Test whether pickling preserves predictor parameters
TEST(gpu_predictor, MGPU_PicklingTest) {
int ngpu;
dh::safe_cuda(cudaGetDeviceCount(&ngpu));
dmlc::TemporaryDirectory tempdir;
const std::string tmp_file = tempdir.path + "/simple.libsvm";
CreateBigTestData(tmp_file, 600);
DMatrixHandle dmat[1];
BoosterHandle bst, bst2;
std::vector<bst_float> label;
for (int i = 0; i < 200; ++i) {
label.push_back((i % 2 ? 1 : 0));
}
// Load data matrix
CheckCAPICall(XGDMatrixCreateFromFile(tmp_file.c_str(), 0, &dmat[0]));
CheckCAPICall(XGDMatrixSetFloatInfo(dmat[0], "label", label.data(), 200));
// Create booster
CheckCAPICall(XGBoosterCreate(dmat, 1, &bst));
// Set parameters
CheckCAPICall(XGBoosterSetParam(bst, "seed", "0"));
CheckCAPICall(XGBoosterSetParam(bst, "base_score", "0.5"));
CheckCAPICall(XGBoosterSetParam(bst, "booster", "gbtree"));
CheckCAPICall(XGBoosterSetParam(bst, "learning_rate", "0.01"));
CheckCAPICall(XGBoosterSetParam(bst, "max_depth", "8"));
CheckCAPICall(XGBoosterSetParam(bst, "objective", "binary:logistic"));
CheckCAPICall(XGBoosterSetParam(bst, "seed", "123"));
CheckCAPICall(XGBoosterSetParam(bst, "tree_method", "gpu_hist"));
CheckCAPICall(XGBoosterSetParam(bst, "n_gpus", std::to_string(ngpu).c_str()));
CheckCAPICall(XGBoosterSetParam(bst, "predictor", "gpu_predictor"));
// Run boosting iterations
for (int i = 0; i < 10; ++i) {
CheckCAPICall(XGBoosterUpdateOneIter(bst, i, dmat[0]));
}
// Delete matrix
CheckCAPICall(XGDMatrixFree(dmat[0]));
// Pickle
const char* dptr;
bst_ulong len;
std::string buf;
CheckCAPICall(XGBoosterGetModelRaw(bst, &len, &dptr));
buf = std::string(dptr, len);
CheckCAPICall(XGBoosterFree(bst));
// Unpickle
CheckCAPICall(XGBoosterCreate(nullptr, 0, &bst2));
CheckCAPICall(XGBoosterLoadModelFromBuffer(bst2, buf.c_str(), len));
{ // Query predictor
const auto& kwargs = QueryBoosterConfigurationArguments(bst2);
ASSERT_EQ(kwargs.at("predictor"), "gpu_predictor");
ASSERT_EQ(kwargs.at("n_gpus"), std::to_string(ngpu).c_str());
}
{ // Change n_gpus and query again
CheckCAPICall(XGBoosterSetParam(bst2, "n_gpus", "1"));
const auto& kwargs = QueryBoosterConfigurationArguments(bst2);
ASSERT_EQ(kwargs.at("n_gpus"), "1");
}
{ // Change predictor and query again
CheckCAPICall(XGBoosterSetParam(bst2, "predictor", "cpu_predictor"));
const auto& kwargs = QueryBoosterConfigurationArguments(bst2);
ASSERT_EQ(kwargs.at("predictor"), "cpu_predictor");
}
CheckCAPICall(XGBoosterFree(bst2));
}
#endif // defined(XGBOOST_USE_NCCL)
#if defined(XGBOOST_USE_NCCL)
// multi-GPU predictor test
TEST(gpu_predictor, MGPU_Test) {
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor"));
std::unique_ptr<Predictor> cpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("cpu_predictor"));
gpu_predictor->Init({std::pair<std::string, std::string>("n_gpus", "-1")}, {});
cpu_predictor->Init({}, {});
for (size_t i = 1; i < 33; i *= 2) {
int n_row = i, n_col = i;
auto dmat = CreateDMatrix(n_row, n_col, 0);
gbm::GBTreeModel model = CreateTestModel();
// Test predict batch
HostDeviceVector<float> gpu_out_predictions;
HostDeviceVector<float> cpu_out_predictions;
gpu_predictor->PredictBatch((*dmat).get(), &gpu_out_predictions, model, 0);
cpu_predictor->PredictBatch((*dmat).get(), &cpu_out_predictions, model, 0);
std::vector<float>& gpu_out_predictions_h = gpu_out_predictions.HostVector();
std::vector<float>& cpu_out_predictions_h = cpu_out_predictions.HostVector();
float abs_tolerance = 0.001;
for (int j = 0; j < gpu_out_predictions.Size(); j++) {
ASSERT_NEAR(gpu_out_predictions_h[j], cpu_out_predictions_h[j], abs_tolerance);
}
delete dmat;
}
}
#endif // defined(XGBOOST_USE_NCCL)
} // namespace predictor
} // namespace xgboost
|
3774ab9b372b29decda731798d9cd9132d2f6911.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define checkCudaErrors(x) printf("%s (%d)\n", hipGetErrorString(x), __LINE__)
__global__ void kernel(double *a, double *b, double *c, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
c[i] = a[i] + b[i];
// Possible correction: let one thread do more than one calculation.
// If there is less threads than elements to calculate, then some threads will
// do 2 calculations (instead of one).
if (i < N-640) {
c[i+640] = a[i+640] + b[i+640];
}
}
int main(int argc, char **argv)
{
int N = 1000;
int sz_in_bytes = N*sizeof(double);
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
h_a = (double*)malloc(sz_in_bytes);
h_b = (double*)malloc(sz_in_bytes);
h_c = (double*)malloc(sz_in_bytes);
// Initiate values on h_a and h_b
for(int i = 0 ; i < N ; i++) {
h_a[i] = 1./(1.+i);
h_b[i] = (i-1.)/(i+1.);
}
checkCudaErrors(hipMalloc((void**)&d_a, sz_in_bytes));
// Correction:
// checkCudaErrors(hipMalloc((void**)&d_b, 0));
checkCudaErrors(hipMalloc((void**)&d_b, sz_in_bytes));
checkCudaErrors(hipMalloc((void**)&d_c, sz_in_bytes));
checkCudaErrors(hipMemcpy(d_a, h_a, sz_in_bytes, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_b, h_b, sz_in_bytes, hipMemcpyHostToDevice));
// 640 threads au total.
// But we calculate 1000 values.
// => error.
// One correction is: use enough threads.
// dim3 dimBlock(64, 1, 1);
// dim3 dimGrid(10, 1, 1) => dim3 dimGrid(10, 1, 1);
// Another correction is:
// Let a thread make more than one calculation (see function kernel()).
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(16, 1, 1);
hipLaunchKernelGGL(( kernel), dim3(dimGrid) , dim3(dimBlock), 0, 0, d_a, d_b, d_c, N);
checkCudaErrors(hipMemcpy(h_c, d_c, sz_in_bytes, hipMemcpyDeviceToHost));
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// Verifying
double err = 0, norm = 0;
for(int i = 0 ; i < N ; i++) {
double err_loc = fabs(h_c[i] - (h_a[i]+h_b[i]));
err += err_loc;
norm += fabs(h_c[i]);
}
if (err/norm < 1.e-16) {
printf("SUCCESS (Relative error : %.3e)\n", err/norm);
} else {
printf("ERROR (Relative error : %.3e)\n", err/norm);
}
free(h_a);
free(h_b);
free(h_c);
return 0;
}
| 3774ab9b372b29decda731798d9cd9132d2f6911.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define checkCudaErrors(x) printf("%s (%d)\n", cudaGetErrorString(x), __LINE__)
__global__ void kernel(double *a, double *b, double *c, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
c[i] = a[i] + b[i];
// Possible correction: let one thread do more than one calculation.
// If there is less threads than elements to calculate, then some threads will
// do 2 calculations (instead of one).
if (i < N-640) {
c[i+640] = a[i+640] + b[i+640];
}
}
int main(int argc, char **argv)
{
int N = 1000;
int sz_in_bytes = N*sizeof(double);
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
h_a = (double*)malloc(sz_in_bytes);
h_b = (double*)malloc(sz_in_bytes);
h_c = (double*)malloc(sz_in_bytes);
// Initiate values on h_a and h_b
for(int i = 0 ; i < N ; i++) {
h_a[i] = 1./(1.+i);
h_b[i] = (i-1.)/(i+1.);
}
checkCudaErrors(cudaMalloc((void**)&d_a, sz_in_bytes));
// Correction:
// checkCudaErrors(cudaMalloc((void**)&d_b, 0));
checkCudaErrors(cudaMalloc((void**)&d_b, sz_in_bytes));
checkCudaErrors(cudaMalloc((void**)&d_c, sz_in_bytes));
checkCudaErrors(cudaMemcpy(d_a, h_a, sz_in_bytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_b, h_b, sz_in_bytes, cudaMemcpyHostToDevice));
// 640 threads au total.
// But we calculate 1000 values.
// => error.
// One correction is: use enough threads.
// dim3 dimBlock(64, 1, 1);
// dim3 dimGrid(10, 1, 1) => dim3 dimGrid(10, 1, 1);
// Another correction is:
// Let a thread make more than one calculation (see function kernel()).
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(16, 1, 1);
kernel<<<dimGrid , dimBlock>>>(d_a, d_b, d_c, N);
checkCudaErrors(cudaMemcpy(h_c, d_c, sz_in_bytes, cudaMemcpyDeviceToHost));
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Verifying
double err = 0, norm = 0;
for(int i = 0 ; i < N ; i++) {
double err_loc = fabs(h_c[i] - (h_a[i]+h_b[i]));
err += err_loc;
norm += fabs(h_c[i]);
}
if (err/norm < 1.e-16) {
printf("SUCCESS (Relative error : %.3e)\n", err/norm);
} else {
printf("ERROR (Relative error : %.3e)\n", err/norm);
}
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
55c842eea0a653d26aecb86238a624f7b06ab622.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include <opencv2/gpu/device/common.hpp>
#include <opencv2/gpu/device/vec_traits.hpp>
#include <opencv2/gpu/device/vec_math.hpp>
#include <opencv2/gpu/device/limits.hpp>
namespace cv { namespace gpu {
namespace device
{
template <typename D>
__global__ void Bayer2BGR_8u(const PtrStepb src, PtrStepSz<D> dst, const bool blue_last, const bool start_with_green)
{
const int s_x = blockIdx.x * blockDim.x + threadIdx.x;
int s_y = blockIdx.y * blockDim.y + threadIdx.y;
if (s_y >= dst.rows || (s_x << 2) >= dst.cols)
return;
s_y = ::min(::max(s_y, 1), dst.rows - 2);
uchar4 patch[3][3];
patch[0][1] = ((const uchar4*) src.ptr(s_y - 1))[s_x];
patch[0][0] = ((const uchar4*) src.ptr(s_y - 1))[::max(s_x - 1, 0)];
patch[0][2] = ((const uchar4*) src.ptr(s_y - 1))[::min(s_x + 1, ((dst.cols + 3) >> 2) - 1)];
patch[1][1] = ((const uchar4*) src.ptr(s_y))[s_x];
patch[1][0] = ((const uchar4*) src.ptr(s_y))[::max(s_x - 1, 0)];
patch[1][2] = ((const uchar4*) src.ptr(s_y))[::min(s_x + 1, ((dst.cols + 3) >> 2) - 1)];
patch[2][1] = ((const uchar4*) src.ptr(s_y + 1))[s_x];
patch[2][0] = ((const uchar4*) src.ptr(s_y + 1))[::max(s_x - 1, 0)];
patch[2][2] = ((const uchar4*) src.ptr(s_y + 1))[::min(s_x + 1, ((dst.cols + 3) >> 2) - 1)];
D res0 = VecTraits<D>::all(numeric_limits<uchar>::max());
D res1 = VecTraits<D>::all(numeric_limits<uchar>::max());
D res2 = VecTraits<D>::all(numeric_limits<uchar>::max());
D res3 = VecTraits<D>::all(numeric_limits<uchar>::max());
if ((s_y & 1) ^ start_with_green)
{
const int t0 = (patch[0][1].x + patch[2][1].x + 1) >> 1;
const int t1 = (patch[1][0].w + patch[1][1].y + 1) >> 1;
const int t2 = (patch[0][1].x + patch[0][1].z + patch[2][1].x + patch[2][1].z + 2) >> 2;
const int t3 = (patch[0][1].y + patch[1][1].x + patch[1][1].z + patch[2][1].y + 2) >> 2;
const int t4 = (patch[0][1].z + patch[2][1].z + 1) >> 1;
const int t5 = (patch[1][1].y + patch[1][1].w + 1) >> 1;
const int t6 = (patch[0][1].z + patch[0][2].x + patch[2][1].z + patch[2][2].x + 2) >> 2;
const int t7 = (patch[0][1].w + patch[1][1].z + patch[1][2].x + patch[2][1].w + 2) >> 2;
if ((s_y & 1) ^ blue_last)
{
res0.x = t1;
res0.y = patch[1][1].x;
res0.z = t0;
res1.x = patch[1][1].y;
res1.y = t3;
res1.z = t2;
res2.x = t5;
res2.y = patch[1][1].z;
res2.z = t4;
res3.x = patch[1][1].w;
res3.y = t7;
res3.z = t6;
}
else
{
res0.x = t0;
res0.y = patch[1][1].x;
res0.z = t1;
res1.x = t2;
res1.y = t3;
res1.z = patch[1][1].y;
res2.x = t4;
res2.y = patch[1][1].z;
res2.z = t5;
res3.x = t6;
res3.y = t7;
res3.z = patch[1][1].w;
}
}
else
{
const int t0 = (patch[0][0].w + patch[0][1].y + patch[2][0].w + patch[2][1].y + 2) >> 2;
const int t1 = (patch[0][1].x + patch[1][0].w + patch[1][1].y + patch[2][1].x + 2) >> 2;
const int t2 = (patch[0][1].y + patch[2][1].y + 1) >> 1;
const int t3 = (patch[1][1].x + patch[1][1].z + 1) >> 1;
const int t4 = (patch[0][1].y + patch[0][1].w + patch[2][1].y + patch[2][1].w + 2) >> 2;
const int t5 = (patch[0][1].z + patch[1][1].y + patch[1][1].w + patch[2][1].z + 2) >> 2;
const int t6 = (patch[0][1].w + patch[2][1].w + 1) >> 1;
const int t7 = (patch[1][1].z + patch[1][2].x + 1) >> 1;
if ((s_y & 1) ^ blue_last)
{
res0.x = patch[1][1].x;
res0.y = t1;
res0.z = t0;
res1.x = t3;
res1.y = patch[1][1].y;
res1.z = t2;
res2.x = patch[1][1].z;
res2.y = t5;
res2.z = t4;
res3.x = t7;
res3.y = patch[1][1].w;
res3.z = t6;
}
else
{
res0.x = t0;
res0.y = t1;
res0.z = patch[1][1].x;
res1.x = t2;
res1.y = patch[1][1].y;
res1.z = t3;
res2.x = t4;
res2.y = t5;
res2.z = patch[1][1].z;
res3.x = t6;
res3.y = patch[1][1].w;
res3.z = t7;
}
}
const int d_x = (blockIdx.x * blockDim.x + threadIdx.x) << 2;
const int d_y = blockIdx.y * blockDim.y + threadIdx.y;
dst(d_y, d_x) = res0;
if (d_x + 1 < dst.cols)
dst(d_y, d_x + 1) = res1;
if (d_x + 2 < dst.cols)
dst(d_y, d_x + 2) = res2;
if (d_x + 3 < dst.cols)
dst(d_y, d_x + 3) = res3;
}
template <typename D>
__global__ void Bayer2BGR_16u(const PtrStepb src, PtrStepSz<D> dst, const bool blue_last, const bool start_with_green)
{
const int s_x = blockIdx.x * blockDim.x + threadIdx.x;
int s_y = blockIdx.y * blockDim.y + threadIdx.y;
if (s_y >= dst.rows || (s_x << 1) >= dst.cols)
return;
s_y = ::min(::max(s_y, 1), dst.rows - 2);
ushort2 patch[3][3];
patch[0][1] = ((const ushort2*) src.ptr(s_y - 1))[s_x];
patch[0][0] = ((const ushort2*) src.ptr(s_y - 1))[::max(s_x - 1, 0)];
patch[0][2] = ((const ushort2*) src.ptr(s_y - 1))[::min(s_x + 1, ((dst.cols + 1) >> 1) - 1)];
patch[1][1] = ((const ushort2*) src.ptr(s_y))[s_x];
patch[1][0] = ((const ushort2*) src.ptr(s_y))[::max(s_x - 1, 0)];
patch[1][2] = ((const ushort2*) src.ptr(s_y))[::min(s_x + 1, ((dst.cols + 1) >> 1) - 1)];
patch[2][1] = ((const ushort2*) src.ptr(s_y + 1))[s_x];
patch[2][0] = ((const ushort2*) src.ptr(s_y + 1))[::max(s_x - 1, 0)];
patch[2][2] = ((const ushort2*) src.ptr(s_y + 1))[::min(s_x + 1, ((dst.cols + 1) >> 1) - 1)];
D res0 = VecTraits<D>::all(numeric_limits<ushort>::max());
D res1 = VecTraits<D>::all(numeric_limits<ushort>::max());
if ((s_y & 1) ^ start_with_green)
{
const int t0 = (patch[0][1].x + patch[2][1].x + 1) >> 1;
const int t1 = (patch[1][0].y + patch[1][1].y + 1) >> 1;
const int t2 = (patch[0][1].x + patch[0][2].x + patch[2][1].x + patch[2][2].x + 2) >> 2;
const int t3 = (patch[0][1].y + patch[1][1].x + patch[1][2].x + patch[2][1].y + 2) >> 2;
if ((s_y & 1) ^ blue_last)
{
res0.x = t1;
res0.y = patch[1][1].x;
res0.z = t0;
res1.x = patch[1][1].y;
res1.y = t3;
res1.z = t2;
}
else
{
res0.x = t0;
res0.y = patch[1][1].x;
res0.z = t1;
res1.x = t2;
res1.y = t3;
res1.z = patch[1][1].y;
}
}
else
{
const int t0 = (patch[0][0].y + patch[0][1].y + patch[2][0].y + patch[2][1].y + 2) >> 2;
const int t1 = (patch[0][1].x + patch[1][0].y + patch[1][1].y + patch[2][1].x + 2) >> 2;
const int t2 = (patch[0][1].y + patch[2][1].y + 1) >> 1;
const int t3 = (patch[1][1].x + patch[1][2].x + 1) >> 1;
if ((s_y & 1) ^ blue_last)
{
res0.x = patch[1][1].x;
res0.y = t1;
res0.z = t0;
res1.x = t3;
res1.y = patch[1][1].y;
res1.z = t2;
}
else
{
res0.x = t0;
res0.y = t1;
res0.z = patch[1][1].x;
res1.x = t2;
res1.y = patch[1][1].y;
res1.z = t3;
}
}
const int d_x = (blockIdx.x * blockDim.x + threadIdx.x) << 1;
const int d_y = blockIdx.y * blockDim.y + threadIdx.y;
dst(d_y, d_x) = res0;
if (d_x + 1 < dst.cols)
dst(d_y, d_x + 1) = res1;
}
template <int cn>
void Bayer2BGR_8u_gpu(PtrStepSzb src, PtrStepSzb dst, bool blue_last, bool start_with_green, hipStream_t stream)
{
typedef typename TypeVec<uchar, cn>::vec_type dst_t;
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, 4 * block.x), divUp(dst.rows, block.y));
cudaSafeCall( hipFuncSetCacheConfig(Bayer2BGR_8u<dst_t>, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( Bayer2BGR_8u<dst_t>), dim3(grid), dim3(block), 0, stream, src, (PtrStepSz<dst_t>)dst, blue_last, start_with_green);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <int cn>
void Bayer2BGR_16u_gpu(PtrStepSzb src, PtrStepSzb dst, bool blue_last, bool start_with_green, hipStream_t stream)
{
typedef typename TypeVec<ushort, cn>::vec_type dst_t;
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, 2 * block.x), divUp(dst.rows, block.y));
cudaSafeCall( hipFuncSetCacheConfig(Bayer2BGR_16u<dst_t>, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( Bayer2BGR_16u<dst_t>), dim3(grid), dim3(block), 0, stream, src, (PtrStepSz<dst_t>)dst, blue_last, start_with_green);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void Bayer2BGR_8u_gpu<3>(PtrStepSzb src, PtrStepSzb dst, bool blue_last, bool start_with_green, hipStream_t stream);
template void Bayer2BGR_8u_gpu<4>(PtrStepSzb src, PtrStepSzb dst, bool blue_last, bool start_with_green, hipStream_t stream);
template void Bayer2BGR_16u_gpu<3>(PtrStepSzb src, PtrStepSzb dst, bool blue_last, bool start_with_green, hipStream_t stream);
template void Bayer2BGR_16u_gpu<4>(PtrStepSzb src, PtrStepSzb dst, bool blue_last, bool start_with_green, hipStream_t stream);
}
}}
#endif /* CUDA_DISABLER */ | 55c842eea0a653d26aecb86238a624f7b06ab622.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include <opencv2/gpu/device/common.hpp>
#include <opencv2/gpu/device/vec_traits.hpp>
#include <opencv2/gpu/device/vec_math.hpp>
#include <opencv2/gpu/device/limits.hpp>
namespace cv { namespace gpu {
namespace device
{
template <typename D>
__global__ void Bayer2BGR_8u(const PtrStepb src, PtrStepSz<D> dst, const bool blue_last, const bool start_with_green)
{
const int s_x = blockIdx.x * blockDim.x + threadIdx.x;
int s_y = blockIdx.y * blockDim.y + threadIdx.y;
if (s_y >= dst.rows || (s_x << 2) >= dst.cols)
return;
s_y = ::min(::max(s_y, 1), dst.rows - 2);
uchar4 patch[3][3];
patch[0][1] = ((const uchar4*) src.ptr(s_y - 1))[s_x];
patch[0][0] = ((const uchar4*) src.ptr(s_y - 1))[::max(s_x - 1, 0)];
patch[0][2] = ((const uchar4*) src.ptr(s_y - 1))[::min(s_x + 1, ((dst.cols + 3) >> 2) - 1)];
patch[1][1] = ((const uchar4*) src.ptr(s_y))[s_x];
patch[1][0] = ((const uchar4*) src.ptr(s_y))[::max(s_x - 1, 0)];
patch[1][2] = ((const uchar4*) src.ptr(s_y))[::min(s_x + 1, ((dst.cols + 3) >> 2) - 1)];
patch[2][1] = ((const uchar4*) src.ptr(s_y + 1))[s_x];
patch[2][0] = ((const uchar4*) src.ptr(s_y + 1))[::max(s_x - 1, 0)];
patch[2][2] = ((const uchar4*) src.ptr(s_y + 1))[::min(s_x + 1, ((dst.cols + 3) >> 2) - 1)];
D res0 = VecTraits<D>::all(numeric_limits<uchar>::max());
D res1 = VecTraits<D>::all(numeric_limits<uchar>::max());
D res2 = VecTraits<D>::all(numeric_limits<uchar>::max());
D res3 = VecTraits<D>::all(numeric_limits<uchar>::max());
if ((s_y & 1) ^ start_with_green)
{
const int t0 = (patch[0][1].x + patch[2][1].x + 1) >> 1;
const int t1 = (patch[1][0].w + patch[1][1].y + 1) >> 1;
const int t2 = (patch[0][1].x + patch[0][1].z + patch[2][1].x + patch[2][1].z + 2) >> 2;
const int t3 = (patch[0][1].y + patch[1][1].x + patch[1][1].z + patch[2][1].y + 2) >> 2;
const int t4 = (patch[0][1].z + patch[2][1].z + 1) >> 1;
const int t5 = (patch[1][1].y + patch[1][1].w + 1) >> 1;
const int t6 = (patch[0][1].z + patch[0][2].x + patch[2][1].z + patch[2][2].x + 2) >> 2;
const int t7 = (patch[0][1].w + patch[1][1].z + patch[1][2].x + patch[2][1].w + 2) >> 2;
if ((s_y & 1) ^ blue_last)
{
res0.x = t1;
res0.y = patch[1][1].x;
res0.z = t0;
res1.x = patch[1][1].y;
res1.y = t3;
res1.z = t2;
res2.x = t5;
res2.y = patch[1][1].z;
res2.z = t4;
res3.x = patch[1][1].w;
res3.y = t7;
res3.z = t6;
}
else
{
res0.x = t0;
res0.y = patch[1][1].x;
res0.z = t1;
res1.x = t2;
res1.y = t3;
res1.z = patch[1][1].y;
res2.x = t4;
res2.y = patch[1][1].z;
res2.z = t5;
res3.x = t6;
res3.y = t7;
res3.z = patch[1][1].w;
}
}
else
{
const int t0 = (patch[0][0].w + patch[0][1].y + patch[2][0].w + patch[2][1].y + 2) >> 2;
const int t1 = (patch[0][1].x + patch[1][0].w + patch[1][1].y + patch[2][1].x + 2) >> 2;
const int t2 = (patch[0][1].y + patch[2][1].y + 1) >> 1;
const int t3 = (patch[1][1].x + patch[1][1].z + 1) >> 1;
const int t4 = (patch[0][1].y + patch[0][1].w + patch[2][1].y + patch[2][1].w + 2) >> 2;
const int t5 = (patch[0][1].z + patch[1][1].y + patch[1][1].w + patch[2][1].z + 2) >> 2;
const int t6 = (patch[0][1].w + patch[2][1].w + 1) >> 1;
const int t7 = (patch[1][1].z + patch[1][2].x + 1) >> 1;
if ((s_y & 1) ^ blue_last)
{
res0.x = patch[1][1].x;
res0.y = t1;
res0.z = t0;
res1.x = t3;
res1.y = patch[1][1].y;
res1.z = t2;
res2.x = patch[1][1].z;
res2.y = t5;
res2.z = t4;
res3.x = t7;
res3.y = patch[1][1].w;
res3.z = t6;
}
else
{
res0.x = t0;
res0.y = t1;
res0.z = patch[1][1].x;
res1.x = t2;
res1.y = patch[1][1].y;
res1.z = t3;
res2.x = t4;
res2.y = t5;
res2.z = patch[1][1].z;
res3.x = t6;
res3.y = patch[1][1].w;
res3.z = t7;
}
}
const int d_x = (blockIdx.x * blockDim.x + threadIdx.x) << 2;
const int d_y = blockIdx.y * blockDim.y + threadIdx.y;
dst(d_y, d_x) = res0;
if (d_x + 1 < dst.cols)
dst(d_y, d_x + 1) = res1;
if (d_x + 2 < dst.cols)
dst(d_y, d_x + 2) = res2;
if (d_x + 3 < dst.cols)
dst(d_y, d_x + 3) = res3;
}
template <typename D>
__global__ void Bayer2BGR_16u(const PtrStepb src, PtrStepSz<D> dst, const bool blue_last, const bool start_with_green)
{
const int s_x = blockIdx.x * blockDim.x + threadIdx.x;
int s_y = blockIdx.y * blockDim.y + threadIdx.y;
if (s_y >= dst.rows || (s_x << 1) >= dst.cols)
return;
s_y = ::min(::max(s_y, 1), dst.rows - 2);
ushort2 patch[3][3];
patch[0][1] = ((const ushort2*) src.ptr(s_y - 1))[s_x];
patch[0][0] = ((const ushort2*) src.ptr(s_y - 1))[::max(s_x - 1, 0)];
patch[0][2] = ((const ushort2*) src.ptr(s_y - 1))[::min(s_x + 1, ((dst.cols + 1) >> 1) - 1)];
patch[1][1] = ((const ushort2*) src.ptr(s_y))[s_x];
patch[1][0] = ((const ushort2*) src.ptr(s_y))[::max(s_x - 1, 0)];
patch[1][2] = ((const ushort2*) src.ptr(s_y))[::min(s_x + 1, ((dst.cols + 1) >> 1) - 1)];
patch[2][1] = ((const ushort2*) src.ptr(s_y + 1))[s_x];
patch[2][0] = ((const ushort2*) src.ptr(s_y + 1))[::max(s_x - 1, 0)];
patch[2][2] = ((const ushort2*) src.ptr(s_y + 1))[::min(s_x + 1, ((dst.cols + 1) >> 1) - 1)];
D res0 = VecTraits<D>::all(numeric_limits<ushort>::max());
D res1 = VecTraits<D>::all(numeric_limits<ushort>::max());
if ((s_y & 1) ^ start_with_green)
{
const int t0 = (patch[0][1].x + patch[2][1].x + 1) >> 1;
const int t1 = (patch[1][0].y + patch[1][1].y + 1) >> 1;
const int t2 = (patch[0][1].x + patch[0][2].x + patch[2][1].x + patch[2][2].x + 2) >> 2;
const int t3 = (patch[0][1].y + patch[1][1].x + patch[1][2].x + patch[2][1].y + 2) >> 2;
if ((s_y & 1) ^ blue_last)
{
res0.x = t1;
res0.y = patch[1][1].x;
res0.z = t0;
res1.x = patch[1][1].y;
res1.y = t3;
res1.z = t2;
}
else
{
res0.x = t0;
res0.y = patch[1][1].x;
res0.z = t1;
res1.x = t2;
res1.y = t3;
res1.z = patch[1][1].y;
}
}
else
{
const int t0 = (patch[0][0].y + patch[0][1].y + patch[2][0].y + patch[2][1].y + 2) >> 2;
const int t1 = (patch[0][1].x + patch[1][0].y + patch[1][1].y + patch[2][1].x + 2) >> 2;
const int t2 = (patch[0][1].y + patch[2][1].y + 1) >> 1;
const int t3 = (patch[1][1].x + patch[1][2].x + 1) >> 1;
if ((s_y & 1) ^ blue_last)
{
res0.x = patch[1][1].x;
res0.y = t1;
res0.z = t0;
res1.x = t3;
res1.y = patch[1][1].y;
res1.z = t2;
}
else
{
res0.x = t0;
res0.y = t1;
res0.z = patch[1][1].x;
res1.x = t2;
res1.y = patch[1][1].y;
res1.z = t3;
}
}
const int d_x = (blockIdx.x * blockDim.x + threadIdx.x) << 1;
const int d_y = blockIdx.y * blockDim.y + threadIdx.y;
dst(d_y, d_x) = res0;
if (d_x + 1 < dst.cols)
dst(d_y, d_x + 1) = res1;
}
template <int cn>
void Bayer2BGR_8u_gpu(PtrStepSzb src, PtrStepSzb dst, bool blue_last, bool start_with_green, cudaStream_t stream)
{
typedef typename TypeVec<uchar, cn>::vec_type dst_t;
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, 4 * block.x), divUp(dst.rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(Bayer2BGR_8u<dst_t>, cudaFuncCachePreferL1) );
Bayer2BGR_8u<dst_t><<<grid, block, 0, stream>>>(src, (PtrStepSz<dst_t>)dst, blue_last, start_with_green);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <int cn>
void Bayer2BGR_16u_gpu(PtrStepSzb src, PtrStepSzb dst, bool blue_last, bool start_with_green, cudaStream_t stream)
{
typedef typename TypeVec<ushort, cn>::vec_type dst_t;
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, 2 * block.x), divUp(dst.rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(Bayer2BGR_16u<dst_t>, cudaFuncCachePreferL1) );
Bayer2BGR_16u<dst_t><<<grid, block, 0, stream>>>(src, (PtrStepSz<dst_t>)dst, blue_last, start_with_green);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void Bayer2BGR_8u_gpu<3>(PtrStepSzb src, PtrStepSzb dst, bool blue_last, bool start_with_green, cudaStream_t stream);
template void Bayer2BGR_8u_gpu<4>(PtrStepSzb src, PtrStepSzb dst, bool blue_last, bool start_with_green, cudaStream_t stream);
template void Bayer2BGR_16u_gpu<3>(PtrStepSzb src, PtrStepSzb dst, bool blue_last, bool start_with_green, cudaStream_t stream);
template void Bayer2BGR_16u_gpu<4>(PtrStepSzb src, PtrStepSzb dst, bool blue_last, bool start_with_green, cudaStream_t stream);
}
}}
#endif /* CUDA_DISABLER */ |
79578e3ccb98fdc0417ac836dee98d7787f666f1.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2020, the YACCLAB contributors, as
// shown by the AUTHORS file. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include <opencv2/cudafeatures2d.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// SAUF in GPU
#define BLOCK_ROWS 16
#define BLOCK_COLS 16
using namespace cv;
namespace {
__device__ unsigned Find(const int* s_buf, unsigned n) {
unsigned label = s_buf[n];
assert(label > 0);
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
assert(label > 0);
}
return n;
}
__device__ void Union(int* s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a + 1);
done = (old == b + 1);
b = old - 1;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b + 1);
done = (old == a + 1);
a = old - 1;
}
else {
done = true;
}
} while (!done);
}
__global__ void Initialization(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned img_index = row * (img.step / img.elem_size) + col;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
if (img[img_index] > 0) {
labels[labels_index] = labels_index + 1;
}
else {
labels[labels_index] = 0;
}
}
}
__global__ void Merge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned img_index = row * img.step + col;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
#define CONDITION_P col > 0 && row > 0 && img[img_index - img.step - 1] > 0
#define CONDITION_Q row > 0 && img[img_index - img.step] > 0
#define CONDITION_R col < img.cols - 1 && row > 0 && img[img_index - img.step + 1] > 0
#define CONDITION_S col > 0 && img[img_index - 1] > 0
#define CONDITION_X img[img_index] > 0
#define ACTION_1 // nothing to do
#define ACTION_2 // LabelsSolver::NewLabel(); // new label
#define ACTION_3 Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size - 1); //img_labels_row_prev[c - 1]; // x <- p
#define ACTION_4 Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size); //img_labels_row_prev[c]; // x <- q
#define ACTION_5 Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size + 1); //img_labels_row_prev[c + 1]; // x <- r
#define ACTION_6 Union(labels.data, labels_index, labels_index - 1); //img_labels_row[c - 1]; // x <- s
#define ACTION_7 Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size - 1); \
Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size + 1); //LabelsSolver::Merge(img_labels_row_prev[c - 1], img_labels_row_prev[c + 1]); // x <- p + r
#define ACTION_8 Union(labels.data, labels_index, labels_index - 1); \
Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size + 1); //LabelsSolver::Merge(img_labels_row[c - 1], img_labels_row_prev[c + 1]); // x <- s + r
#include "labeling_wu_2009_tree.inc.h"
#undef ACTION_1
#undef ACTION_2
#undef ACTION_3
#undef ACTION_4
#undef ACTION_5
#undef ACTION_6
#undef ACTION_7
#undef ACTION_8
#undef CONDITION_P
#undef CONDITION_Q
#undef CONDITION_R
#undef CONDITION_S
#undef CONDITION_X
}
}
__global__ void Compression(cuda::PtrStepSzi labels) {
unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
unsigned int val = labels[labels_index];
if (val > 0) {
labels[labels_index] = Find(labels.data, labels_index) + 1;
}
}
}
}
class C_SAUF : public GpuLabeling2D<Connectivity2D::CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
public:
C_SAUF() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
Merge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//Mat1i local_labels(img_.size());
//d_img_labels_.download(local_labels);
//cuda::GpuMat d_global_labels;
//d_img_labels_.copyTo(d_global_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//Mat1i global_labels(img_.size());
//d_global_labels.download(global_labels);
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
//d_img_labels_.download(img_labels_);
hipDeviceSynchronize();
}
void PerformLabelingBlocksize(int x, int y, int z) {
d_img_labels_.create(d_img_.size(), CV_32SC1);
grid_size_ = dim3((d_img_.cols + x - 1) / x, (d_img_.rows + y - 1) / y, 1);
block_size_ = dim3(x, y, 1);
BLOCKSIZE_KERNEL(Initialization, grid_size_, block_size_, 0, d_img_, d_img_labels_)
BLOCKSIZE_KERNEL(Merge, grid_size_, block_size_, 0, d_img_, d_img_labels_)
BLOCKSIZE_KERNEL(Compression, grid_size_, block_size_, 0, d_img_labels_)
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.size(), CV_32SC1);
hipMemset2D(d_img_labels_.data, d_img_labels_.step, 0, d_img_labels_.cols * 4, d_img_labels_.rows);
hipDeviceSynchronize();
double t = perf_.stop();
perf_.start();
hipMemset2D(d_img_labels_.data, d_img_labels_.step, 0, d_img_labels_.cols * 4, d_img_labels_.rows);
hipDeviceSynchronize();
t -= perf_.stop();
return t;
}
double Dealloc() {
perf_.start();
perf_.stop();
return perf_.last();
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void AllScans() {
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
Merge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
hipDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
AllScans();
perf_.stop();
perf_.store(Step(StepType::ALL_SCANS), perf_.last());
double dealloc_timing = Dealloc();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(C_SAUF);
REGISTER_KERNELS(C_SAUF, Initialization, Merge, Compression) | 79578e3ccb98fdc0417ac836dee98d7787f666f1.cu | // Copyright (c) 2020, the YACCLAB contributors, as
// shown by the AUTHORS file. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include <opencv2/cudafeatures2d.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// SAUF in GPU
#define BLOCK_ROWS 16
#define BLOCK_COLS 16
using namespace cv;
namespace {
__device__ unsigned Find(const int* s_buf, unsigned n) {
unsigned label = s_buf[n];
assert(label > 0);
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
assert(label > 0);
}
return n;
}
__device__ void Union(int* s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a + 1);
done = (old == b + 1);
b = old - 1;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b + 1);
done = (old == a + 1);
a = old - 1;
}
else {
done = true;
}
} while (!done);
}
__global__ void Initialization(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned img_index = row * (img.step / img.elem_size) + col;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
if (img[img_index] > 0) {
labels[labels_index] = labels_index + 1;
}
else {
labels[labels_index] = 0;
}
}
}
__global__ void Merge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned img_index = row * img.step + col;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
#define CONDITION_P col > 0 && row > 0 && img[img_index - img.step - 1] > 0
#define CONDITION_Q row > 0 && img[img_index - img.step] > 0
#define CONDITION_R col < img.cols - 1 && row > 0 && img[img_index - img.step + 1] > 0
#define CONDITION_S col > 0 && img[img_index - 1] > 0
#define CONDITION_X img[img_index] > 0
#define ACTION_1 // nothing to do
#define ACTION_2 // LabelsSolver::NewLabel(); // new label
#define ACTION_3 Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size - 1); //img_labels_row_prev[c - 1]; // x <- p
#define ACTION_4 Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size); //img_labels_row_prev[c]; // x <- q
#define ACTION_5 Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size + 1); //img_labels_row_prev[c + 1]; // x <- r
#define ACTION_6 Union(labels.data, labels_index, labels_index - 1); //img_labels_row[c - 1]; // x <- s
#define ACTION_7 Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size - 1); \
Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size + 1); //LabelsSolver::Merge(img_labels_row_prev[c - 1], img_labels_row_prev[c + 1]); // x <- p + r
#define ACTION_8 Union(labels.data, labels_index, labels_index - 1); \
Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size + 1); //LabelsSolver::Merge(img_labels_row[c - 1], img_labels_row_prev[c + 1]); // x <- s + r
#include "labeling_wu_2009_tree.inc.h"
#undef ACTION_1
#undef ACTION_2
#undef ACTION_3
#undef ACTION_4
#undef ACTION_5
#undef ACTION_6
#undef ACTION_7
#undef ACTION_8
#undef CONDITION_P
#undef CONDITION_Q
#undef CONDITION_R
#undef CONDITION_S
#undef CONDITION_X
}
}
__global__ void Compression(cuda::PtrStepSzi labels) {
unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
unsigned int val = labels[labels_index];
if (val > 0) {
labels[labels_index] = Find(labels.data, labels_index) + 1;
}
}
}
}
class C_SAUF : public GpuLabeling2D<Connectivity2D::CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
public:
C_SAUF() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
Merge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//Mat1i local_labels(img_.size());
//d_img_labels_.download(local_labels);
//cuda::GpuMat d_global_labels;
//d_img_labels_.copyTo(d_global_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//Mat1i global_labels(img_.size());
//d_global_labels.download(global_labels);
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
//d_img_labels_.download(img_labels_);
cudaDeviceSynchronize();
}
void PerformLabelingBlocksize(int x, int y, int z) {
d_img_labels_.create(d_img_.size(), CV_32SC1);
grid_size_ = dim3((d_img_.cols + x - 1) / x, (d_img_.rows + y - 1) / y, 1);
block_size_ = dim3(x, y, 1);
BLOCKSIZE_KERNEL(Initialization, grid_size_, block_size_, 0, d_img_, d_img_labels_)
BLOCKSIZE_KERNEL(Merge, grid_size_, block_size_, 0, d_img_, d_img_labels_)
BLOCKSIZE_KERNEL(Compression, grid_size_, block_size_, 0, d_img_labels_)
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.size(), CV_32SC1);
cudaMemset2D(d_img_labels_.data, d_img_labels_.step, 0, d_img_labels_.cols * 4, d_img_labels_.rows);
cudaDeviceSynchronize();
double t = perf_.stop();
perf_.start();
cudaMemset2D(d_img_labels_.data, d_img_labels_.step, 0, d_img_labels_.cols * 4, d_img_labels_.rows);
cudaDeviceSynchronize();
t -= perf_.stop();
return t;
}
double Dealloc() {
perf_.start();
perf_.stop();
return perf_.last();
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void AllScans() {
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
Merge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
cudaDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
AllScans();
perf_.stop();
perf_.store(Step(StepType::ALL_SCANS), perf_.last());
double dealloc_timing = Dealloc();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(C_SAUF);
REGISTER_KERNELS(C_SAUF, Initialization, Merge, Compression) |
6528dbb87f4b7d0f5cb5f459d656f2523ba0e1cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <mpi.h>
#include <math.h>
#define DEFAULT_GRIDSIZE 1024
// Save/ Print only every nth step:
#define PRINTSTEP 10
void init_cells(double* grid, int gridsize);
void print(double* grid, int padded_grid_size, int time);
void save(FILE *f, double* grid, int padded_grid_size, int time);
int find_option( int argc, char **argv, const char *option );
int read_int( int argc, char **argv, const char *option, int default_value );
char *read_string( int argc, char **argv, const char *option, char *default_value );
__global__ void init_matrix(float *A, int rank, int n_block = 512, n = 1024) {
int i = threadIdx.y + blockIdx.y * (n_block / gridDim.y);
int end_i = i + (n_block / gridDim.y / blockDim.y);
if(rank == 2 || rank == 3) {
i += n_block;
end_i += n_block;
}
int j = threadIdx.x + blockIdx.x * (n_block / gridDim.x);
int end_j = j + (n_block / gridDim.x / blockDim.x);
if(rank == 0 || rank == 3) {
j += n_block;
end_j += n_block;
}
for(; i < end_i; i++) {
for(; j < end_j; j++) {
if(i < n/2 && j < n/2 || n/2 >= i && n/2 >= j)
A[i*n+j] = 1;
else
A[i*n+j] = 0;
}
}
}
__global__ void jacobi_iteration(float *A_in, float *A_out,
bool *gt_eps, int rank, float eps = 0.01,
int n_block = 512, int n = 1024) {
__shared__ gt_eps_block = false;
int i = threadIdx.y + blockIdx.y * (n_block / gridDim.y);
int end_i = i + (n_block / gridDim.y / blockDim.y);
if(rank == 2 || rank == 3) {
i += n_block;
end_i += n_block;
}
int j = threadIdx.x + blockIdx.x * (n_block / gridDim.x);
int end_j = j + (n_block / gridDim.x / blockDim.x);
if(rank == 0 || rank == 3) {
j += n_block;
end_j += n_block;
}
for(; i < end_i; i++) {
for(; j < end_j; j++) {
if (0 < i && i < n - 1.0 && 0 < j && n - 1.0)
A_out[i*n+j] = (A_in[(i-1)*n+j] + A_in[(i+1)*n+j] \
+ A_in[i*n+j-1] + A_in[i*n+j+1]) / 4.0;
if(fdimf(A_out[i*n+j], A_in[i*n+j]) > eps)
gt_eps_block = true;
}
}
gt_eps[blockIdx.y * gridDim.x + blockIdx.x] = gt_eps_block;
}
void exchange_borders(float *A, int rank, int n = 1024, int processes = 4) {
int before = abs((rank - 1) % 4);
int next = (rank + 1) % 4;
MPI_Sendrecv()
}
int main(int argc, char** argv) {
int numtasks, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Status status;
hipError_t err = hipGetLastError();
if( find_option( argc, argv, "-h" ) >= 0 )
{
printf( "Options:\n" );
printf( "-h to see this help\n" );
printf( "-n <int> to set the grid size\n" );
printf( "-o <filename> to specify the output file name\n" );
return 1;
}
int GRIDSIZE = read_int( argc, argv, "-n", DEFAULT_GRIDSIZE );
// Check gridsize for some basic assumptions
if(GRIDSIZE != DEFAULT_GRIDSIZE) {
printf("Only Gridsize of 1024 is allowed!\n");
return 1;
}
char *savename = read_string( argc, argv, "-o", "sample_conduct.txt" );
FILE *f = savename ? fopen( savename, "w" ) : NULL;
if( f == NULL )
{
printf( "failed to open %s\n", savename );
return 1;
}
float *A_block;
float *A_block_tmp;
bool *gt_eps;
float *A;
hipMallocManaged(&A_block, sizeof *A_block * 1024 * 1024);
hipMallocManaged(&A_block_tmp, sizeof *A_block_tmp * 1024 * 1024);
hipLaunchKernelGGL(( init_matrix), dim3(1), dim3(256), 0, 0, A_block, rank);
if (err != hipSuccess)
fprintf(stderr, "Error: %s\n", hipGetErrorString(err));
hipDeviceSynchronize();
dim3 nb(4,4);
dim3 nt(1, 128)
hipMallocManaged(>_eps, sizeof *gt_eps * 4 * 4);
while(**something**) {
hipLaunchKernelGGL(( jacobi_iteration), dim3(nb), dim3(nt), 0, 0, A_block, A_block_tmp, gt_eps, rank);
}
double *T, *T_block, *Tn_block;
// Allocate Grid with a padding on every side for convenience
int padded_grid_size = GRIDSIZE+2;
int blocksize = GRIDSIZE / numtasks;
if(rank == 0) {
T=(double *) malloc((padded_grid_size)*(padded_grid_size)*sizeof(double));
// temp grid to hold the calculated data for the next time step
init_cells(T,padded_grid_size);
}
T_block = (double *) malloc(padded_grid_size * (blocksize+2) * sizeof(double));
Tn_block = (double *) malloc(padded_grid_size * (blocksize+2) * sizeof(double));
MPI_Scatter(&T[padded_grid_size], padded_grid_size * blocksize, MPI_DOUBLE, &T_block[padded_grid_size],
padded_grid_size * blocksize, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// remember -- our grid has a border around it!
int last_row = padded_grid_size * (blocksize + 1);
int next_to_last_row = last_row - padded_grid_size;
for(int t=0;t<TIMESTEPS;t++) { // Loop for the time steps
if(rank > 0 && rank < numtasks-1) {
MPI_Sendrecv(&T_block[padded_grid_size], padded_grid_size, MPI_DOUBLE, rank-1, 42,
T_block, padded_grid_size, MPI_DOUBLE, rank-1, 42, MPI_COMM_WORLD, NULL);
MPI_Sendrecv(&T_block[next_to_last_row], padded_grid_size, MPI_DOUBLE, rank+1, 42,
&T_block[last_row], padded_grid_size, MPI_DOUBLE, rank+1, 42, MPI_COMM_WORLD, NULL);
} else if(rank == 0) {
MPI_Sendrecv(&T_block[next_to_last_row], padded_grid_size, MPI_DOUBLE, 1, 42, &T_block[last_row], padded_grid_size,
MPI_DOUBLE, 1, 42, MPI_COMM_WORLD, &status);
} else if(rank == numtasks-1) {
MPI_Sendrecv(&T_block[padded_grid_size], padded_grid_size, MPI_DOUBLE, rank-1, 42, T_block, padded_grid_size,
MPI_DOUBLE, rank-1, 42, MPI_COMM_WORLD, &status);
}
// Calculate grid cells for next timestep
for(int i=1; i<blocksize+1; i++) {
for(int j=1; j<padded_grid_size-1; j++) {
Tn_block[i*padded_grid_size + j] = (T_block[(i-1)*padded_grid_size+j] + T_block[i*padded_grid_size + (j-1)] \
+ T_block[i*padded_grid_size+(j+1)] + T_block[(i+1)*padded_grid_size+j]) / 4.0;
}
}
// copy new grid into old one
for(int i=1; i<blocksize+1; i++) {
for(int j=1; j<padded_grid_size-1; j++) {
T_block[i*padded_grid_size+j] = Tn_block[i*padded_grid_size+j];
}
}
if(!(t % PRINTSTEP)) {
MPI_Barrier(MPI_COMM_WORLD);
MPI_Gather(&T_block[padded_grid_size], blocksize*padded_grid_size, MPI_DOUBLE, &T[padded_grid_size],
blocksize*padded_grid_size, MPI_DOUBLE, 0, MPI_COMM_WORLD);
if(rank == 0)
save(f,T,padded_grid_size,TIMESTEPS);
}
}
fclose(f);
MPI_Finalize();
return 0;
}
void init_cells(double* grid, int gridsize) {
int i,j;
// set everything to zero, even the border
for(i=0;i<gridsize;i++) {
for(j=0;j<gridsize;j++) {
grid[i*gridsize + j]=0;
}
}
// but the most inner 4 cells
for(i=gridsize/2-1;i<=gridsize/2;i++) {
for(j=gridsize/2-1;j<=gridsize/2;j++) {
grid[i*gridsize + j]=1;
}
}
}
void print(double* grid, int padded_grid_size, int time) {
printf("\n\n\n");
int i,j;
// we don't want to print the border!
for(i=1;i<padded_grid_size-1;i++) {
for(j=1;j<padded_grid_size-1;j++) {
printf("%.2f ",grid[i*padded_grid_size + j]);
}
printf("\n");
}
}
void save( FILE *f, double* grid, int padded_grid_size,int TIMESTEPS)
{
int i,j;
static int first = 1;
if( first )
{
fprintf( f, "# %d %d\n", TIMESTEPS, padded_grid_size-2 );
first = 0;
}
for(i = 1; i < padded_grid_size-1; i++ ) {
for(j=1; j < padded_grid_size-1; j++) {
fprintf( f, "%.g ", grid[i* padded_grid_size + j] );
}
fprintf(f,"\n");
}
}
//
// command line option processing
//
int find_option( int argc, char **argv, const char *option )
{
int i;
for( i = 1; i < argc; i++ )
if( strcmp( argv[i], option ) == 0 )
return i;
return -1;
}
int read_int( int argc, char **argv, const char *option, int default_value )
{
int iplace = find_option( argc, argv, option );
if( iplace >= 0 && iplace < argc-1 )
return atoi( argv[iplace+1] );
return default_value;
}
char *read_string( int argc, char **argv, const char *option, char *default_value )
{
int iplace = find_option( argc, argv, option );
if( iplace >= 0 && iplace < argc-1 )
return argv[iplace+1];
return default_value;
}
| 6528dbb87f4b7d0f5cb5f459d656f2523ba0e1cb.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <mpi.h>
#include <math.h>
#define DEFAULT_GRIDSIZE 1024
// Save/ Print only every nth step:
#define PRINTSTEP 10
void init_cells(double* grid, int gridsize);
void print(double* grid, int padded_grid_size, int time);
void save(FILE *f, double* grid, int padded_grid_size, int time);
int find_option( int argc, char **argv, const char *option );
int read_int( int argc, char **argv, const char *option, int default_value );
char *read_string( int argc, char **argv, const char *option, char *default_value );
__global__ void init_matrix(float *A, int rank, int n_block = 512, n = 1024) {
int i = threadIdx.y + blockIdx.y * (n_block / gridDim.y);
int end_i = i + (n_block / gridDim.y / blockDim.y);
if(rank == 2 || rank == 3) {
i += n_block;
end_i += n_block;
}
int j = threadIdx.x + blockIdx.x * (n_block / gridDim.x);
int end_j = j + (n_block / gridDim.x / blockDim.x);
if(rank == 0 || rank == 3) {
j += n_block;
end_j += n_block;
}
for(; i < end_i; i++) {
for(; j < end_j; j++) {
if(i < n/2 && j < n/2 || n/2 >= i && n/2 >= j)
A[i*n+j] = 1;
else
A[i*n+j] = 0;
}
}
}
__global__ void jacobi_iteration(float *A_in, float *A_out,
bool *gt_eps, int rank, float eps = 0.01,
int n_block = 512, int n = 1024) {
__shared__ gt_eps_block = false;
int i = threadIdx.y + blockIdx.y * (n_block / gridDim.y);
int end_i = i + (n_block / gridDim.y / blockDim.y);
if(rank == 2 || rank == 3) {
i += n_block;
end_i += n_block;
}
int j = threadIdx.x + blockIdx.x * (n_block / gridDim.x);
int end_j = j + (n_block / gridDim.x / blockDim.x);
if(rank == 0 || rank == 3) {
j += n_block;
end_j += n_block;
}
for(; i < end_i; i++) {
for(; j < end_j; j++) {
if (0 < i && i < n - 1.0 && 0 < j && n - 1.0)
A_out[i*n+j] = (A_in[(i-1)*n+j] + A_in[(i+1)*n+j] \
+ A_in[i*n+j-1] + A_in[i*n+j+1]) / 4.0;
if(fdimf(A_out[i*n+j], A_in[i*n+j]) > eps)
gt_eps_block = true;
}
}
gt_eps[blockIdx.y * gridDim.x + blockIdx.x] = gt_eps_block;
}
void exchange_borders(float *A, int rank, int n = 1024, int processes = 4) {
int before = abs((rank - 1) % 4);
int next = (rank + 1) % 4;
MPI_Sendrecv()
}
int main(int argc, char** argv) {
int numtasks, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Status status;
cudaError_t err = cudaGetLastError();
if( find_option( argc, argv, "-h" ) >= 0 )
{
printf( "Options:\n" );
printf( "-h to see this help\n" );
printf( "-n <int> to set the grid size\n" );
printf( "-o <filename> to specify the output file name\n" );
return 1;
}
int GRIDSIZE = read_int( argc, argv, "-n", DEFAULT_GRIDSIZE );
// Check gridsize for some basic assumptions
if(GRIDSIZE != DEFAULT_GRIDSIZE) {
printf("Only Gridsize of 1024 is allowed!\n");
return 1;
}
char *savename = read_string( argc, argv, "-o", "sample_conduct.txt" );
FILE *f = savename ? fopen( savename, "w" ) : NULL;
if( f == NULL )
{
printf( "failed to open %s\n", savename );
return 1;
}
float *A_block;
float *A_block_tmp;
bool *gt_eps;
float *A;
cudaMallocManaged(&A_block, sizeof *A_block * 1024 * 1024);
cudaMallocManaged(&A_block_tmp, sizeof *A_block_tmp * 1024 * 1024);
init_matrix<<<1, 256>>>(A_block, rank);
if (err != cudaSuccess)
fprintf(stderr, "Error: %s\n", cudaGetErrorString(err));
cudaDeviceSynchronize();
dim3 nb(4,4);
dim3 nt(1, 128)
cudaMallocManaged(>_eps, sizeof *gt_eps * 4 * 4);
while(**something**) {
jacobi_iteration<<<nb, nt>>>(A_block, A_block_tmp, gt_eps, rank);
}
double *T, *T_block, *Tn_block;
// Allocate Grid with a padding on every side for convenience
int padded_grid_size = GRIDSIZE+2;
int blocksize = GRIDSIZE / numtasks;
if(rank == 0) {
T=(double *) malloc((padded_grid_size)*(padded_grid_size)*sizeof(double));
// temp grid to hold the calculated data for the next time step
init_cells(T,padded_grid_size);
}
T_block = (double *) malloc(padded_grid_size * (blocksize+2) * sizeof(double));
Tn_block = (double *) malloc(padded_grid_size * (blocksize+2) * sizeof(double));
MPI_Scatter(&T[padded_grid_size], padded_grid_size * blocksize, MPI_DOUBLE, &T_block[padded_grid_size],
padded_grid_size * blocksize, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// remember -- our grid has a border around it!
int last_row = padded_grid_size * (blocksize + 1);
int next_to_last_row = last_row - padded_grid_size;
for(int t=0;t<TIMESTEPS;t++) { // Loop for the time steps
if(rank > 0 && rank < numtasks-1) {
MPI_Sendrecv(&T_block[padded_grid_size], padded_grid_size, MPI_DOUBLE, rank-1, 42,
T_block, padded_grid_size, MPI_DOUBLE, rank-1, 42, MPI_COMM_WORLD, NULL);
MPI_Sendrecv(&T_block[next_to_last_row], padded_grid_size, MPI_DOUBLE, rank+1, 42,
&T_block[last_row], padded_grid_size, MPI_DOUBLE, rank+1, 42, MPI_COMM_WORLD, NULL);
} else if(rank == 0) {
MPI_Sendrecv(&T_block[next_to_last_row], padded_grid_size, MPI_DOUBLE, 1, 42, &T_block[last_row], padded_grid_size,
MPI_DOUBLE, 1, 42, MPI_COMM_WORLD, &status);
} else if(rank == numtasks-1) {
MPI_Sendrecv(&T_block[padded_grid_size], padded_grid_size, MPI_DOUBLE, rank-1, 42, T_block, padded_grid_size,
MPI_DOUBLE, rank-1, 42, MPI_COMM_WORLD, &status);
}
// Calculate grid cells for next timestep
for(int i=1; i<blocksize+1; i++) {
for(int j=1; j<padded_grid_size-1; j++) {
Tn_block[i*padded_grid_size + j] = (T_block[(i-1)*padded_grid_size+j] + T_block[i*padded_grid_size + (j-1)] \
+ T_block[i*padded_grid_size+(j+1)] + T_block[(i+1)*padded_grid_size+j]) / 4.0;
}
}
// copy new grid into old one
for(int i=1; i<blocksize+1; i++) {
for(int j=1; j<padded_grid_size-1; j++) {
T_block[i*padded_grid_size+j] = Tn_block[i*padded_grid_size+j];
}
}
if(!(t % PRINTSTEP)) {
MPI_Barrier(MPI_COMM_WORLD);
MPI_Gather(&T_block[padded_grid_size], blocksize*padded_grid_size, MPI_DOUBLE, &T[padded_grid_size],
blocksize*padded_grid_size, MPI_DOUBLE, 0, MPI_COMM_WORLD);
if(rank == 0)
save(f,T,padded_grid_size,TIMESTEPS);
}
}
fclose(f);
MPI_Finalize();
return 0;
}
void init_cells(double* grid, int gridsize) {
int i,j;
// set everything to zero, even the border
for(i=0;i<gridsize;i++) {
for(j=0;j<gridsize;j++) {
grid[i*gridsize + j]=0;
}
}
// but the most inner 4 cells
for(i=gridsize/2-1;i<=gridsize/2;i++) {
for(j=gridsize/2-1;j<=gridsize/2;j++) {
grid[i*gridsize + j]=1;
}
}
}
void print(double* grid, int padded_grid_size, int time) {
printf("\n\n\n");
int i,j;
// we don't want to print the border!
for(i=1;i<padded_grid_size-1;i++) {
for(j=1;j<padded_grid_size-1;j++) {
printf("%.2f ",grid[i*padded_grid_size + j]);
}
printf("\n");
}
}
void save( FILE *f, double* grid, int padded_grid_size,int TIMESTEPS)
{
int i,j;
static int first = 1;
if( first )
{
fprintf( f, "# %d %d\n", TIMESTEPS, padded_grid_size-2 );
first = 0;
}
for(i = 1; i < padded_grid_size-1; i++ ) {
for(j=1; j < padded_grid_size-1; j++) {
fprintf( f, "%.g ", grid[i* padded_grid_size + j] );
}
fprintf(f,"\n");
}
}
//
// command line option processing
//
int find_option( int argc, char **argv, const char *option )
{
int i;
for( i = 1; i < argc; i++ )
if( strcmp( argv[i], option ) == 0 )
return i;
return -1;
}
int read_int( int argc, char **argv, const char *option, int default_value )
{
int iplace = find_option( argc, argv, option );
if( iplace >= 0 && iplace < argc-1 )
return atoi( argv[iplace+1] );
return default_value;
}
char *read_string( int argc, char **argv, const char *option, char *default_value )
{
int iplace = find_option( argc, argv, option );
if( iplace >= 0 && iplace < argc-1 )
return argv[iplace+1];
return default_value;
}
|
396683a57d7a6b98417a06732e5811c8372be619.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//Library Definition
//Constant Definition
#define PI 3.141592654
#define blocksize 32
#define Repetitions 8192
//Print matrix into standard output
void print(double * M,int cols,int rows);
void dot(double * a,double * b, double & c, int cols);
void Create_New_Matrix(double * M,double * New,int * vec, int p0, int pp,int nn);
/*
DEVICE FUNCTIONS
*/
//Matrix transposition (Rows and Cols of M)
__global__ void matrixSum(const double * M1,const double * M2,double * Msum,double alpha,double beta, int rows, int cols)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < rows && col < cols)
{
Msum[row + col*rows] = alpha*M1[row+col*rows]+beta*M2[row+col*rows];
}
} | 396683a57d7a6b98417a06732e5811c8372be619.cu | #include "includes.h"
//Library Definition
//Constant Definition
#define PI 3.141592654
#define blocksize 32
#define Repetitions 8192
//Print matrix into standard output
void print(double * M,int cols,int rows);
void dot(double * a,double * b, double & c, int cols);
void Create_New_Matrix(double * M,double * New,int * vec, int p0, int pp,int nn);
/*
DEVICE FUNCTIONS
*/
//Matrix transposition (Rows and Cols of M)
__global__ void matrixSum(const double * M1,const double * M2,double * Msum,double alpha,double beta, int rows, int cols)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < rows && col < cols)
{
Msum[row + col*rows] = alpha*M1[row+col*rows]+beta*M2[row+col*rows];
}
} |
f9de5aa26da63f9c4509e4231e1a7c62f9cde77b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define BOOST_TEST_MODULE example
#include <assert.h>
#include <boost/functional/hash.hpp>
#include <boost/make_shared.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/test/included/unit_test.hpp>
#include <cuv/ndarray.hpp>
#include "random_tree_image_gpu.h"
#include "random_tree_image.h"
#include "score.h"
#include "test_common.h"
#include "utils.h"
using namespace curfil;
#define DUMP_IMAGE 0
static const int SEED = 4711;
class Fixture {
public:
Fixture() {
clearImageCache();
}
};
BOOST_FIXTURE_TEST_SUITE(RandomTreeImageGPUTest, Fixture)
template<class W>
__global__
static void calculcateScoreKernel(ScoreType* result, const size_t numClasses,
const W* leftClasses, const W* rightClasses, const unsigned int leftRightStride,
const W* allClasses, const ScoreType totalLeft, const ScoreType totalRight) {
ScoreType score = NormalizedInformationGainScore::calculateScore(numClasses, leftClasses, rightClasses,
leftRightStride,
allClasses, totalLeft, totalRight);
*result = score;
}
static ScoreType scoreOnGPU(const size_t numClasses, const cuv::ndarray<int, cuv::host_memory_space>& leftClasses,
const cuv::ndarray<int, cuv::host_memory_space>& rightClasses,
const cuv::ndarray<int, cuv::host_memory_space>& allClasses,
const ScoreType totalLeft, const ScoreType totalRight) {
cuv::ndarray<ScoreType, cuv::dev_memory_space> result(1);
const cuv::ndarray<int, cuv::dev_memory_space> leftClassesDevice(leftClasses);
const cuv::ndarray<int, cuv::dev_memory_space> rightClassesDevice(rightClasses);
const cuv::ndarray<int, cuv::dev_memory_space> allClassesDevice(allClasses);
const unsigned int leftRightStride = leftClassesDevice.stride(0);
BOOST_REQUIRE_EQUAL(leftRightStride, rightClassesDevice.stride(0));
hipLaunchKernelGGL(( calculcateScoreKernel), dim3(1),dim3(1), 0, 0, result.ptr(), numClasses, leftClassesDevice.ptr(), rightClassesDevice.ptr(),
leftRightStride, allClassesDevice.ptr(), totalLeft, totalRight);
cudaSafeCall(hipDeviceSynchronize());
double res = result[0];
return res;
}
static ScoreType scoreOnGPU(const size_t size, const WeightType* leftClass, const WeightType* rightClass,
const WeightType* allClasses, const ScoreType totalLeft, const ScoreType totalRight) {
cuv::ndarray<int, cuv::host_memory_space> leftClassArray(size);
cuv::ndarray<int, cuv::host_memory_space> rightClassArray(size);
cuv::ndarray<int, cuv::host_memory_space> allClassesArray(size);
for (size_t i = 0; i < size; i++) {
leftClassArray[i] = leftClass[i];
rightClassArray[i] = rightClass[i];
allClassesArray[i] = allClasses[i];
}
return scoreOnGPU(size, leftClassArray, rightClassArray, allClassesArray, totalLeft, totalRight);
}
BOOST_AUTO_TEST_CASE(testInformationGainScore) {
const int numClasses = 2;
cuv::ndarray<int, cuv::host_memory_space> left(numClasses);
cuv::ndarray<int, cuv::host_memory_space> right(numClasses);
cuv::ndarray<int, cuv::host_memory_space> allClass(numClasses);
for (size_t num = 1; num < 10; num++) {
// best case scenario: score=0
left[0] = num;
right[0] = num;
left[1] = num;
right[1] = num;
allClass[0] = 2 * num;
allClass[1] = 2 * num;
ScoreType totalLeft = 2 * num;
ScoreType totalRight = 2 * num;
BOOST_REQUIRE_EQUAL(left.stride(0), 1);
ScoreType score = NormalizedInformationGainScore::calculateScore(numClasses, left.ptr(), right.ptr(),
left.stride(0), allClass.ptr(), totalLeft, totalRight);
BOOST_CHECK_CLOSE(0, score, 0);
BOOST_CHECK_CLOSE(score, scoreOnGPU(numClasses, left, right, allClass, totalLeft, totalRight), 1e-6);
// best case scenario: score=1
left[0] = 0;
right[0] = 2 * num;
left[1] = 2 * num;
right[1] = 0;
allClass[0] = 2 * num;
allClass[1] = 2 * num;
totalLeft = 2 * num;
totalRight = 2 * num;
BOOST_REQUIRE_EQUAL(left.stride(0), 1);
score = NormalizedInformationGainScore::calculateScore(numClasses, left.ptr(), right.ptr(),
left.stride(0), allClass.ptr(), totalLeft, totalRight);
BOOST_CHECK_CLOSE(1, score, 0);
BOOST_CHECK_CLOSE(score, scoreOnGPU(numClasses, left, right, allClass, totalLeft, totalRight), 1e-6);
}
left[0] = 5;
right[0] = 3;
left[1] = 8;
right[1] = 1;
allClass[0] = 8;
allClass[1] = 9;
double totalLeft = left[0] + left[1];
double totalRight = right[0] + right[1];
BOOST_REQUIRE_EQUAL(left.stride(0), 1);
ScoreType score1 = NormalizedInformationGainScore::calculateScore(numClasses, left.ptr(), right.ptr(),
left.stride(0), allClass.ptr(), totalLeft, totalRight);
BOOST_CHECK_CLOSE(0.080185, score1, 1e-4);
BOOST_CHECK_CLOSE(score1, scoreOnGPU(numClasses, left, right, allClass, totalLeft, totalRight), 1e-5);
left[0] = 2;
right[0] = 6;
left[1] = 8;
right[1] = 1;
totalLeft = left[0] + left[1];
totalRight = right[0] + right[1];
BOOST_REQUIRE_EQUAL(left.stride(0), 1);
ScoreType score2 = NormalizedInformationGainScore::calculateScore(numClasses, left.ptr(), right.ptr(),
left.stride(0), allClass.ptr(), totalLeft, totalRight);
BOOST_CHECK_GT(score2, score1);
BOOST_CHECK_CLOSE(0.33339, score2, 1e-3);
BOOST_CHECK_CLOSE(score2, scoreOnGPU(numClasses, left, right, allClass, totalLeft, totalRight), 1e-6);
// case 1 (a real case)
// histogram: [ 86 241 291 3 267 ]
// histogram left: [ 56 241 290 3 18 ]
// histogram right: [ 30 0 1 0 249 ]
{
const size_t size = 5;
const WeightType all[] = { 86, 241, 291, 3, 267 };
const WeightType left[] = { 56, 241, 290, 3, 18 };
const WeightType right[] = { 30, 0, 1, 0, 249 };
const unsigned int leftRightStride = 1;
const size_t totalLeft = std::accumulate(left, left + size, 0);
const size_t totalRight = std::accumulate(right, right + size, 0);
BOOST_REQUIRE_EQUAL(totalLeft + totalRight, std::accumulate(all, all + size, 0));
ScoreType score = NormalizedInformationGainScore::calculateScore(size, left, right,
leftRightStride, all, totalLeft, totalRight);
BOOST_CHECK_CLOSE(score, 0.491311, 1e-3);
BOOST_CHECK_CLOSE(score, scoreOnGPU(size, left, right, all, totalLeft, totalRight), 1e-6);
score = InformationGainScore::calculateScore(size, left, right,
leftRightStride, all, totalLeft, totalRight);
BOOST_CHECK_CLOSE(score, 0.690912, 1e-3);
}
{
// case 2 (constructed, obviously)
// histogram: [ 50 100 50 0 100 ]
// histogram left: [ 50 100 0 0 0 ]
// histogram right: [ 0 0 50 0 100 ]
const size_t size = 5;
const WeightType all[] = { 50, 100, 50, 0, 100 };
const WeightType left[] = { 50, 100, 0, 0, 0 };
const WeightType right[] = { 0, 0, 50, 0, 100 };
const unsigned int leftRightStride = 1;
const size_t totalLeft = std::accumulate(left, left + size, 0);
const size_t totalRight = std::accumulate(right, right + size, 0);
ScoreType score = NormalizedInformationGainScore::calculateScore(size, left, right, leftRightStride,
all, totalLeft, totalRight);
BOOST_CHECK_CLOSE(score, 0.68533, 1e-3);
score = InformationGainScore::calculateScore(size, left, right, leftRightStride,
all, totalLeft, totalRight);
BOOST_CHECK_CLOSE(score, 1.0, 1e-3);
}
}
template<class T>
static void updateHash(const boost::hash<size_t>& hasher, size_t& hash, const T& value) {
// extract from boost headers
hash ^= hasher(value) + 0x9e3779b9 + (hash << 6) + (hash >> 2);
}
template<class T>
static size_t checkScores(const cuv::ndarray<ScoreType, cuv::host_memory_space>& scores, T numFeatures,
T numThresholds) {
BOOST_CHECK_EQUAL(2, static_cast<int>(scores.ndim()));
BOOST_CHECK_EQUAL(static_cast<size_t>(numThresholds), static_cast<size_t>(scores.shape(0)));
BOOST_CHECK_EQUAL(static_cast<size_t>(numFeatures), static_cast<size_t>(scores.shape(1)));
size_t hash = 0;
boost::hash<size_t> hasher;
for (T feat = 0; feat < numFeatures; feat++) {
for (T thresh = 0; thresh < numThresholds; thresh++) {
const ScoreType score = scores(thresh, feat);
BOOST_CHECK_GE(score, 0.0);
BOOST_CHECK_LE(score, 1.0);
updateHash(hasher, hash, score);
}
}
return hash;
}
static size_t checkCounters(TrainingConfiguration& configuration,
const cuv::ndarray<WeightType, cuv::dev_memory_space> countersDevice,
const std::vector<PixelInstance>& samples) {
const cuv::ndarray<WeightType, cuv::host_memory_space> counters(countersDevice);
size_t hash = 0;
boost::hash<size_t> hasher;
std::map<size_t, size_t> samplesPerLabel;
for (size_t sample = 0; sample < samples.size(); sample++) {
samplesPerLabel[samples[sample].getLabel()]++;
}
size_t numLabels = samplesPerLabel.size();
assert(numLabels > 0);
const size_t features = configuration.getFeatureCount();
const size_t thresholds = configuration.getThresholds();
BOOST_CHECK_EQUAL(4, static_cast<int>(counters.ndim()));
BOOST_CHECK_EQUAL(features, static_cast<size_t>(counters.shape(0)));
BOOST_CHECK_EQUAL(thresholds, static_cast<size_t>(counters.shape(1)));
BOOST_CHECK_EQUAL(numLabels, static_cast<size_t>(counters.shape(2)));
BOOST_CHECK_EQUAL(2lu, static_cast<size_t>(counters.shape()[3]));
for (size_t label = 0; label < numLabels; label++) {
for (size_t thresh = 0; thresh < thresholds; thresh++) {
for (size_t feat = 0; feat < features; feat++) {
const size_t left = counters(feat, thresh, label, 0);
const size_t right = counters(feat, thresh, label, 1);
const size_t numSamples = samplesPerLabel[label];
BOOST_CHECK_EQUAL(numSamples, left + right);
updateHash(hasher, hash, left);
updateHash(hasher, hash, right);
}
}
}
return hash;
}
BOOST_AUTO_TEST_CASE(testDepthFeatureSimple) {
const int NUM_FEAT = 1;
const int NUM_THRESH = 100;
unsigned int samplesPerImage = 500;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 16;
static const int NUM_THREADS = 1;
static const int maxImages = 0;
static const int imageCacheSize = 1;
unsigned int maxSamplesPerBatch = 5000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
ImageFeatureEvaluation featureFunction(0, configuration);
static const int width = 16;
static const int height = 20;
std::vector<RGBDImage> images(1, RGBDImage(width, height));
std::vector<PixelInstance> samples;
ImageFeaturesAndThresholds<cuv::dev_memory_space> featuresAndThresholds(NUM_FEAT, NUM_THRESH,
boost::make_shared<cuv::default_allocator>());
for (int i = 0; i < NUM_THRESH; i++) {
featuresAndThresholds.thresholds()(i, 0) = (i - 50) / 10.0f;
}
featuresAndThresholds.types()[0] = DEPTH;
featuresAndThresholds.offset1X()[0] = 1;
featuresAndThresholds.offset1Y()[0] = 1;
featuresAndThresholds.region1X()[0] = 2;
featuresAndThresholds.region1Y()[0] = 2;
featuresAndThresholds.offset2X()[0] = -3;
featuresAndThresholds.offset2Y()[0] = -1;
featuresAndThresholds.region2X()[0] = 1;
featuresAndThresholds.region2Y()[0] = 1;
featuresAndThresholds.channel1()[0] = 0;
featuresAndThresholds.channel2()[0] = 0;
const int NUM_LABELS = 2;
samples.push_back(PixelInstance(&images[0], 0, Depth(1.0), 6, 3));
samples.push_back(PixelInstance(&images[0], 1, Depth(1.0), 6, 3));
RandomTree<PixelInstance, ImageFeatureFunction> node(0, 0, getPointers(samples), NUM_LABELS);
cuv::ndarray<WeightType, cuv::dev_memory_space> histogram(node.getHistogram());
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples), node,
cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
BOOST_CHECK(isnan(static_cast<FeatureResponseType>(featureResponses(0, 0))));
cuv::ndarray<ScoreType, cuv::host_memory_space> scores(featureFunction.calculateScores(counters,
featuresAndThresholds, histogram));
checkScores(scores, NUM_FEAT, NUM_THRESH);
}
images[0].reset();
clearImageCache();
images[0].setDepth(7, 3, Depth(1.5f));
images[0].setDepth(7, 4, Depth(1.7f));
images[0].setDepth(8, 4, Depth(4.5f));
images[0].setDepth(3, 2, Depth(3.9f));
#if DUMP_IMAGE
image.dumpDepth(std::cout);
#endif
images[0].calculateIntegral();
#if DUMP_IMAGE
CURFIL_INFO("integral:");
image.dumpDepth(std::cout);
#endif
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
cuv::ndarray<ScoreType, cuv::host_memory_space> scores(featureFunction.calculateScores(counters,
featuresAndThresholds, histogram));
checkCounters(configuration, counters, samples);
checkScores(scores, NUM_FEAT, NUM_THRESH);
BOOST_CHECK_CLOSE((1.5 + 1.7 + 4.5) / 3 - 3.9, static_cast<FeatureResponseType>(featureResponses(0, 0)), 1e-6);
for (int label = 0; label < NUM_LABELS; label++) {
for (int thresh = 0; thresh < NUM_THRESH; thresh++) {
for (int feat = 0; feat < NUM_FEAT; feat++) {
BOOST_CHECK_EQUAL(1,
static_cast<int>(counters(feat, thresh, label, 0) + counters(feat, thresh, label, 1)));
BOOST_CHECK_EQUAL(static_cast<unsigned int>(thresh >= 37),
static_cast<unsigned int>(counters(feat, thresh, label, 0)));
}
}
}
images[0].reset();
clearImageCache();
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
images[0].setDepth(x, y, Depth(1.0f));
}
}
images[0].calculateIntegral();
#if DUMP_IMAGE
image.dumpDepth(std::cout);
#endif
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
cuv::ndarray<ScoreType, cuv::host_memory_space> scores(featureFunction.calculateScores(counters,
featuresAndThresholds, histogram));
BOOST_CHECK_CLOSE(0, static_cast<FeatureResponseType>(featureResponses(0, 0)), 0);
checkScores(scores, NUM_FEAT, NUM_THRESH);
checkCounters(configuration, counters, samples);
for (int label = 0; label < NUM_LABELS; label++) {
for (int thresh = 0; thresh < 100; thresh++) {
for (int feat = 0; feat < NUM_FEAT; feat++) {
BOOST_CHECK_EQUAL(1,
static_cast<int>(counters(feat, thresh, label, 0) + counters(feat, thresh, label, 1)));
BOOST_CHECK_EQUAL(static_cast<unsigned int>(thresh >= 50),
static_cast<unsigned int>(counters(feat, thresh, label, 0)));
}
}
}
}
}
}
BOOST_AUTO_TEST_CASE(testColorFeatureSimple) {
unsigned int samplesPerImage = 500;
unsigned int NUM_FEAT = 1;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 16;
static const uint16_t NUM_THRESH = 3;
static const int NUM_THREADS = 1;
static const int maxImages = 0;
static const int imageCacheSize = 1;
unsigned int maxSamplesPerBatch = 5000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
ImageFeatureEvaluation featureFunction(0, configuration);
static const int width = 16;
static const int height = 20;
std::vector<RGBDImage> images(1, RGBDImage(width, height));
std::vector<PixelInstance> samples;
ImageFeaturesAndThresholds<cuv::dev_memory_space> featuresAndThresholds(NUM_FEAT, NUM_THRESH,
boost::make_shared<cuv::default_allocator>());
featuresAndThresholds.types()[0] = COLOR;
featuresAndThresholds.offset1X()[0] = 3;
featuresAndThresholds.offset1Y()[0] = -2;
featuresAndThresholds.region1X()[0] = 4;
featuresAndThresholds.region1Y()[0] = 2;
featuresAndThresholds.offset2X()[0] = -3;
featuresAndThresholds.offset2Y()[0] = -2;
featuresAndThresholds.region2X()[0] = 1;
featuresAndThresholds.region2Y()[0] = 2;
featuresAndThresholds.channel1()[0] = 0;
featuresAndThresholds.channel2()[0] = 0;
featuresAndThresholds.thresholds()(0, 0) = -1.0f;
featuresAndThresholds.thresholds()(1, 0) = 0.0f;
featuresAndThresholds.thresholds()(2, 0) = 1.0f;
const int NUM_LABELS = 2;
samples.push_back(PixelInstance(&images[0], 0, Depth(1.0), 6, 4));
samples.push_back(PixelInstance(&images[0], 1, Depth(1.0), 6, 4));
RandomTree<PixelInstance, ImageFeatureFunction> node(0, 0, getPointers(samples), NUM_LABELS);
cuv::ndarray<WeightType, cuv::dev_memory_space> histogram(node.getHistogram());
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples), node,
cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
BOOST_CHECK_CLOSE(0, static_cast<FeatureResponseType>(featureResponses(0, 0)), 0);
checkCounters(configuration, counters, samples);
assert(static_cast<int>(samples.size()) == NUM_LABELS);
for (size_t label = 0; label < samples.size(); label++) {
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 0, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 0, label, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, label, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 1, label, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 2, label, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 2, label, 1)));
}
}
images[0].reset();
clearImageCache();
images[0].setColor(7, 2, 0, 0.5f);
#if DUMP_IMAGE
image.dump(std::cout);
#endif
images[0].calculateIntegral();
#if DUMP_IMAGE
std::cout << "integral" << std::endl;
image.dump(std::cout);
#endif
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
checkCounters(configuration, counters, samples);
BOOST_CHECK_CLOSE(0.5, static_cast<FeatureResponseType>(featureResponses(0, 0)), 0);
for (size_t label = 0; label < samples.size(); label++) {
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 0, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 0, label, 1)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 1, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, label, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 2, label, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 2, label, 1)));
}
}
images[0].reset();
clearImageCache();
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
images[0].setColor(x, y, 0, 1.0f);
}
}
images[0].calculateIntegral();
#if DUMP_IMAGE
image.dump(std::cout);
#endif
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
BOOST_CHECK_CLOSE(8 * 4 - 4 * 2.0, static_cast<FeatureResponseType>(featureResponses(0, 0)), 0);
checkCounters(configuration, counters, samples);
for (size_t label = 0; label < samples.size(); label++) {
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 0, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 0, label, 1)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 1, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, label, 1)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 2, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 2, label, 1)));
}
}
}
BOOST_AUTO_TEST_CASE(testColorFeatureComplex) {
const size_t NUM_THRESH = 2;
const size_t NUM_FEAT = 3;
const int maxImages = 5;
const int imageCacheSize = 5; // make sure the cache is at least as big as #images
unsigned int samplesPerImage = 500;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 16;
static const int NUM_THREADS = 1;
unsigned int maxSamplesPerBatch = 5000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
ImageFeatureEvaluation featureFunction(0, configuration);
const int width = 12;
const int height = 15;
std::vector<RGBDImage> images(2, RGBDImage(width, height));
std::vector<PixelInstance> samples;
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
for (int c = 0; c < 3; c++) {
float v = 100 * c + y * width + x;
images[0].setColor(x, y, c, v);
images[1].setColor(x, y, c, v / 2.0f);
}
}
}
BOOST_CHECK_CLOSE(179, images[0].getColor(width - 1, height - 1, 0), 0);
BOOST_CHECK_CLOSE(279, images[0].getColor(width - 1, height - 1, 1), 0);
BOOST_CHECK_CLOSE(379, images[0].getColor(width - 1, height - 1, 2), 0);
BOOST_CHECK_CLOSE(179 / 2.0, images[1].getColor(width - 1, height - 1, 0), 0);
BOOST_CHECK_CLOSE(279 / 2.0, images[1].getColor(width - 1, height - 1, 1), 0);
BOOST_CHECK_CLOSE(379 / 2.0, images[1].getColor(width - 1, height - 1, 2), 0);
images[0].calculateIntegral();
images[1].calculateIntegral();
BOOST_CHECK_CLOSE(16110, images[0].getColor(width - 1, height - 1, 0), 0);
BOOST_CHECK_CLOSE(34110, images[0].getColor(width - 1, height - 1, 1), 0);
BOOST_CHECK_CLOSE(52110, images[0].getColor(width - 1, height - 1, 2), 0);
BOOST_CHECK_CLOSE(16110 / 2.0, images[1].getColor(width - 1, height - 1, 0), 0);
BOOST_CHECK_CLOSE(34110 / 2.0, images[1].getColor(width - 1, height - 1, 1), 0);
BOOST_CHECK_CLOSE(52110 / 2.0, images[1].getColor(width - 1, height - 1, 2), 0);
ImageFeaturesAndThresholds<cuv::dev_memory_space> featuresAndThresholds(NUM_FEAT, NUM_THRESH,
boost::make_shared<cuv::default_allocator>());
featuresAndThresholds.types()[0] = COLOR;
featuresAndThresholds.offset1X()[0] = 2;
featuresAndThresholds.offset1Y()[0] = -1;
featuresAndThresholds.region1X()[0] = 2;
featuresAndThresholds.region1Y()[0] = 1;
featuresAndThresholds.offset2X()[0] = -3;
featuresAndThresholds.offset2Y()[0] = 4;
featuresAndThresholds.region2X()[0] = 1;
featuresAndThresholds.region2Y()[0] = 2;
featuresAndThresholds.channel1()[0] = 0;
featuresAndThresholds.channel2()[0] = 2;
featuresAndThresholds.types()[1] = COLOR;
featuresAndThresholds.offset1X()[1] = 2;
featuresAndThresholds.offset1Y()[1] = -1;
featuresAndThresholds.region1X()[1] = 2;
featuresAndThresholds.region1Y()[1] = 2;
featuresAndThresholds.offset2X()[1] = -3;
featuresAndThresholds.offset2Y()[1] = 4;
featuresAndThresholds.region2X()[1] = 1;
featuresAndThresholds.region2Y()[1] = 1;
featuresAndThresholds.channel1()[1] = 1;
featuresAndThresholds.channel2()[1] = 2;
featuresAndThresholds.types()[2] = COLOR;
featuresAndThresholds.offset1X()[2] = -2;
featuresAndThresholds.offset1Y()[2] = 1;
featuresAndThresholds.region1X()[2] = 3;
featuresAndThresholds.region1Y()[2] = 1;
featuresAndThresholds.offset2X()[2] = 3;
featuresAndThresholds.offset2Y()[2] = -4;
featuresAndThresholds.region2X()[2] = 3;
featuresAndThresholds.region2Y()[2] = 3;
featuresAndThresholds.channel1()[2] = 1;
featuresAndThresholds.channel2()[2] = 0;
featuresAndThresholds.thresholds()(0, 0) = 0.0f;
featuresAndThresholds.thresholds()(1, 0) = -500.0f;
featuresAndThresholds.thresholds()(0, 1) = -300.0f;
featuresAndThresholds.thresholds()(1, 1) = 0.0f;
featuresAndThresholds.thresholds()(0, 2) = 0.0f;
featuresAndThresholds.thresholds()(1, 2) = 500.0f;
const int NUM_LABELS = 2;
samples.push_back(PixelInstance(&images[0], 0, Depth(1.0), 6, 4));
samples.push_back(PixelInstance(&images[1], 0, Depth(2.0), 6, 4));
samples.push_back(PixelInstance(&images[0], 1, Depth(1.5), 5, 5));
samples.push_back(PixelInstance(&images[1], 1, Depth(3.1), 3, 4));
RandomTree<PixelInstance, ImageFeatureFunction> node(0, 0, getPointers(samples), NUM_LABELS);
cuv::ndarray<WeightType, cuv::dev_memory_space> histogram(node.getHistogram());
// 2 images, 3 features, 4 samples
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
cuv::ndarray<ScoreType, cuv::host_memory_space> scores(
featureFunction.calculateScores(counters,
featuresAndThresholds, histogram));
BOOST_CHECK_EQUAL(2, static_cast<int>(featureResponses.ndim()));
BOOST_CHECK_EQUAL(3, static_cast<int>(featureResponses.shape(0)));
BOOST_CHECK_EQUAL(4, static_cast<int>(featureResponses.shape(1)));
checkScores(scores, NUM_FEAT, NUM_THRESH);
// values verified by manual calculation
// sample 0, feat 0
BOOST_CHECK_CLOSE(-2040, static_cast<FeatureResponseType>(featureResponses(0, 0)), 0);
// sample 0, feat 1
BOOST_CHECK_CLOSE(1186, static_cast<FeatureResponseType>(featureResponses(1, 0)), 0);
// sample 0, feat 2
BOOST_CHECK(isnan(static_cast<FeatureResponseType>(featureResponses(2, 0))));
// sample 1, feat 0
BOOST_CHECK_CLOSE(-444, static_cast<FeatureResponseType>(featureResponses(0, 1)), 0);
// sample 1, feat 1
BOOST_CHECK_CLOSE(-244, static_cast<FeatureResponseType>(featureResponses(1, 1)), 0);
// sample 1, feat 2
BOOST_CHECK_CLOSE(244, static_cast<FeatureResponseType>(featureResponses(2, 1)), 0);
// sample 2, feat 0
BOOST_CHECK_CLOSE(-884, static_cast<FeatureResponseType>(featureResponses(0, 2)), 0);
// sample 2, feat 1
BOOST_CHECK_CLOSE(-484, static_cast<FeatureResponseType>(featureResponses(1, 2)), 0);
// sample 2, feat 2
BOOST_CHECK_CLOSE(572, static_cast<FeatureResponseType>(featureResponses(2, 2)), 0);
// sample 3, feat 0
BOOST_CHECK_CLOSE(-424, static_cast<FeatureResponseType>(featureResponses(0, 3)), 0);
// sample 3, feat 1
BOOST_CHECK_CLOSE(-224, static_cast<FeatureResponseType>(featureResponses(1, 3)), 0);
// sample 3, feat 2
BOOST_CHECK_CLOSE(224, static_cast<FeatureResponseType>(featureResponses(2, 3)), 0);
checkCounters(configuration, counters, samples);
// -2040 sample 0, feat 0 0
// -444 sample 1, feat 0 0
// -884 sample 2, feat 0 1
// -424 sample 3, feat 0 1
// 1186 sample 0, feat 1 0
// -244 sample 1, feat 1 0
// -484 sample 2, feat 1 1
// -224 sample 3, feat 1 1
// 1551 sample 0, feat 2 0
// 244 sample 1, feat 2 0
// 572 sample 2, feat 2 1
// 224 sample 3, feat 2 1
// feat 0, thresh 0 (0.0f)
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(0, 0, 0, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 0, 0, 1)));
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(0, 0, 1, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 0, 1, 1)));
// feat 0, thresh 1 (-500.0f)
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, 0, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, 0, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, 1, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, 1, 1)));
// feat 1, thresh 0 (-300.0f)
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(1, 0, 0, 0)));
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(1, 0, 0, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(1, 0, 1, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(1, 0, 1, 1)));
// feat 1, thresh 1 (0.0f)
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(1, 1, 0, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(1, 1, 0, 1)));
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(1, 1, 1, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(1, 1, 1, 1)));
// feat 2, thresh 0 (0.0f)
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(2, 0, 0, 0)));
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(2, 0, 0, 1)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(2, 0, 1, 0)));
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(2, 0, 1, 1)));
// feat 2, thresh 1 (+500.0f)
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(2, 1, 0, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(2, 1, 0, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(2, 1, 1, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(2, 1, 1, 1)));
}
BOOST_AUTO_TEST_CASE(testColorFeatureManySamples) {
const int NUM_FEAT = 1000;
const int NUM_THRESH = 50;
unsigned int samplesPerImage = 100;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 50;
static const int NUM_THREADS = 1;
static const int maxImages = 5;
static const int imageCacheSize = 5;
unsigned int maxSamplesPerBatch = 100000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
ImageFeatureEvaluation featureFunction(0, configuration);
std::vector<PixelInstance> samples;
const int width = 640;
const int height = 480;
std::vector<RGBDImage> images(10, RGBDImage(width, height));
for (size_t image = 0; image < images.size(); image++) {
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
for (int c = 0; c < 3; c++) {
float v = 10000 * image + 100 * c + y * width + x;
images[image].setColor(x, y, c, v);
}
}
}
images[image].calculateIntegral();
}
const size_t NUM_LABELS = 10;
const int NUM_SAMPLES = samplesPerImage * images.size();
for (int i = 0; i < NUM_SAMPLES; i++) {
PixelInstance sample(
&images.at(i / (NUM_SAMPLES / images.size())), // image
i / 100, // label
Depth((i % 20) / 10.0 + 1.0), // depth
i % width, // x
i % height // y
);
samples.push_back(sample);
}
RandomTree<PixelInstance, ImageFeatureFunction> node(0, 0, getPointers(samples), NUM_LABELS);
cuv::ndarray<WeightType, cuv::dev_memory_space> histogram(node.getHistogram());
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
ImageFeaturesAndThresholds<cuv::dev_memory_space> featuresAndThresholds =
featureFunction.generateRandomFeatures(batches[0], configuration.getRandomSeed(),
true, cuv::dev_memory_space());
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(
node, batches, featuresAndThresholds);
cuv::ndarray<ScoreType, cuv::host_memory_space> scores(
featureFunction.calculateScores(counters,
featuresAndThresholds, histogram));
size_t scoreHash = checkScores(scores, NUM_FEAT, NUM_THRESH);
size_t counterHash = checkCounters(configuration, counters, samples);
// magic number. used to check for regressions
BOOST_CHECK_EQUAL(4437303196209240250lu, counterHash);
BOOST_CHECK_EQUAL(13702092111133522162lu, scoreHash);
}
}
static void checkNode(boost::shared_ptr<const RandomTree<PixelInstance, ImageFeatureFunction> > node,
const boost::shared_ptr<const TreeNodes>& treeData,
const SplitFunction<PixelInstance, ImageFeatureFunction>* split = 0) {
const size_t numLabels = node->getHistogram().size();
const size_t nodeNr = node->getNodeId();
assert(nodeNr - node->getTreeId() < treeData->numNodes());
TreeNodeData data = getTreeNode(nodeNr, treeData);
if (node->isLeaf()) {
BOOST_CHECK_EQUAL(-1, static_cast<int>(data.leftNodeOffset));
BOOST_CHECK(isnan(static_cast<float>(data.threshold)));
for (size_t label = 0; label < numLabels; label++) {
BOOST_CHECK_EQUAL(static_cast<float>(node->getNormalizedHistogram()[label]),
static_cast<float>(data.histogram(label)));
}
} else {
BOOST_REQUIRE(split);
const ImageFeatureFunction& feature = split->getFeature();
const float expectedThreshold = split->getThreshold();
const int expectedLeftNodeOffset = node->getLeft()->getNodeId() - node->getNodeId();
BOOST_CHECK_EQUAL(expectedLeftNodeOffset, static_cast<int>(data.leftNodeOffset));
BOOST_CHECK_EQUAL(expectedThreshold, static_cast<float>(data.threshold));
BOOST_CHECK_EQUAL(static_cast<int>(feature.getType()), static_cast<int>(data.type));
BOOST_CHECK_EQUAL(feature.getOffset1().getX(), static_cast<int>(data.offset1X));
BOOST_CHECK_EQUAL(feature.getOffset1().getY(), static_cast<int>(data.offset1Y));
BOOST_CHECK_EQUAL(feature.getRegion1().getX(), static_cast<int>(data.region1X));
BOOST_CHECK_EQUAL(feature.getRegion1().getY(), static_cast<int>(data.region1Y));
BOOST_CHECK_EQUAL(feature.getOffset2().getX(), static_cast<int>(data.offset2X));
BOOST_CHECK_EQUAL(feature.getOffset2().getY(), static_cast<int>(data.offset2Y));
BOOST_CHECK_EQUAL(feature.getRegion2().getX(), static_cast<int>(data.region2X));
BOOST_CHECK_EQUAL(feature.getRegion2().getY(), static_cast<int>(data.region2Y));
BOOST_CHECK_EQUAL(feature.getChannel1(), static_cast<uint8_t>(data.channel1));
BOOST_CHECK_EQUAL(feature.getChannel2(), static_cast<uint8_t>(data.channel2));
}
}
BOOST_AUTO_TEST_CASE(testRecallOnGPU) {
const int NUM_FEAT = 1000;
const int NUM_THRESH = 50;
unsigned int samplesPerImage = 100;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 50;
static const int NUM_THREADS = 1;
static const int maxImages = 5;
static const int imageCacheSize = 5;
unsigned int maxSamplesPerBatch = 100000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
ImageFeatureEvaluation featureFunction(0, configuration);
std::vector<PixelInstance> samples;
const int width = 640;
const int height = 480;
std::vector<RGBDImage> images(10, RGBDImage(width, height));
for (size_t image = 0; image < images.size(); image++) {
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
for (int c = 0; c < 3; c++) {
float v = 10000 * image + 100 * c + y * width + x;
images[image].setColor(x, y, c, v);
}
}
}
images[image].calculateIntegral();
}
const size_t NUM_LABELS = 3;
const int NUM_SAMPLES = samplesPerImage * images.size();
for (int i = 0; i < NUM_SAMPLES; i++) {
PixelInstance sample(
&images.at(i / (NUM_SAMPLES / images.size())), // image
(i / 100) % NUM_LABELS, // label
Depth((i % 20) / 10.0 + 0.1), // depth
i % width, // x
i % height // y
);
samples.push_back(sample);
}
/**
* n0
* / \
* / \
* n1 n2
* / \
* / \
* n3 n4
*/
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > n0 =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(0, 0, getPointers(samples),
NUM_LABELS);
std::vector<WeightType> histN1(NUM_LABELS, 0);
histN1[0] = 10;
histN1[1] = 10;
histN1[2] = 10;
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > n1 =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(1, 1, n0, histN1);
std::vector<WeightType> histN2(NUM_LABELS, 0);
histN2[0] = 60;
histN2[1] = 60;
histN2[2] = 20;
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > n2 =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(2, 1, n0, histN2);
std::vector<WeightType> histN3(NUM_LABELS, 0);
histN3[0] = 10;
histN3[2] = 20;
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > n3 =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(3, 2, n1, histN3);
std::vector<WeightType> histN4(NUM_LABELS, 0);
histN4[1] = 50;
histN4[2] = 20;
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > n4 =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(4, 2, n1, histN4);
size_t featureId1 = 1;
float threshold1 = 28.391;
ScoreType score1 = 0.392;
ImageFeatureFunction feature1(COLOR,
Offset(-10, 5), Region(7, 3), 1,
Offset(27, -19), Region(65, 73), 2);
SplitFunction<PixelInstance, ImageFeatureFunction> split1(featureId1, feature1, threshold1, score1);
n1->addChildren(split1, n3, n4);
size_t featureId2 = 2;
float threshold2 = -29.1245;
ScoreType score2 = 0.9371;
ImageFeatureFunction feature2(DEPTH,
Offset(-18, 25), Region(4, 19), 0,
Offset(9, 28), Region(1, 16), 0);
SplitFunction<PixelInstance, ImageFeatureFunction> split2(featureId2, feature2, threshold2, score2);
n0->addChildren(split2, n1, n2);
BOOST_CHECK(n0->isRoot());
BOOST_CHECK_EQUAL(5, n0->countNodes());
cuv::ndarray<WeightType, cuv::host_memory_space> classLabelPriorDistribution(NUM_LABELS);
for (size_t i = 0; i < NUM_LABELS; i++) {
classLabelPriorDistribution[i] = 100;
}
boost::shared_ptr<RandomTreeImage> randomTreeImage = boost::make_shared<RandomTreeImage>(n0, configuration,
classLabelPriorDistribution);
randomTreeImage->normalizeHistograms(0.0);
boost::shared_ptr<const TreeNodes> treeData = convertTree(randomTreeImage);
BOOST_CHECK_EQUAL(n0->countNodes(), static_cast<size_t>(treeData->numNodes()));
BOOST_CHECK_EQUAL(n0->getNumClasses(), static_cast<size_t>(treeData->numLabels()));
checkNode(n0, treeData, &split2);
checkNode(n1, treeData, &split1);
BOOST_REQUIRE(n2->isLeaf());
BOOST_REQUIRE(n3->isLeaf());
BOOST_REQUIRE(n4->isLeaf());
checkNode(n2, treeData);
checkNode(n3, treeData);
checkNode(n4, treeData);
// do classify
const size_t treeCacheSize = 3;
{
RGBDImage image(640, 480);
image.calculateIntegral();
{
utils::Profile classifyImageTimer("classifyImage");
cuv::ndarray<float, cuv::dev_memory_space> output(
cuv::extents[NUM_LABELS][image.getHeight()][image.getWidth()]);
cudaSafeCall(hipMemset(output.ptr(), 0, static_cast<size_t>(output.size() * sizeof(float))));
classifyImage(treeCacheSize, output, image, NUM_LABELS, treeData);
}
}
}
BOOST_AUTO_TEST_CASE(testRecallLargeForest) {
unsigned int samplesPerImage = 100;
const int NUM_FEAT = 1000;
const int NUM_THRESH = 50;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 50;
static const int NUM_THREADS = 1;
static const int maxImages = 5;
static const int imageCacheSize = 5;
unsigned int maxSamplesPerBatch = 100000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
std::vector<PixelInstance> samples;
const size_t NUM_LABELS = 3;
const int width = 640;
const int height = 480;
std::vector<RGBDImage> images(10, RGBDImage(width, height));
const int NUM_SAMPLES = samplesPerImage * images.size();
for (int i = 0; i < NUM_SAMPLES; i++) {
PixelInstance sample(
&images.at(i / (NUM_SAMPLES / images.size())), // image
(i / 100) % NUM_LABELS, // label
Depth((i % 20) / 10.0 + 0.1), // depth
i % width, // x
i % height // y
);
samples.push_back(sample);
}
// explicitly test a tree that exceeds the maximal number of nodes per layer
const size_t numNodes[] = { 10, 100, 3 * NODES_PER_TREE_LAYER + 2 };
Sampler sampler(4711, 0, 1000);
Sampler typeSampler(4711, 0, 1);
Sampler channelSampler(4711, 0, 5);
Sampler offsetSampler(4711, -120, 120);
Sampler regionSampler(4711, 0, 20);
for (size_t treeId = 0; treeId < 3; treeId++) {
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > rootNode =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(treeId, 0, getPointers(samples),
NUM_LABELS);
std::vector<boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > > nodes;
std::map<boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> >,
SplitFunction<PixelInstance, ImageFeatureFunction> > splits;
nodes.push_back(rootNode);
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > previousNode = rootNode;
/**
* creates a degenerated tree with N nodes
*
* n0
* / \
* / \
* n1 n2
* / \
* / \
* n3 n4
* /
* /
* n5
* / \
* / \
* n6 n7
* /
* /
* ...
*/
for (size_t nodeId = 1; nodeId < numNodes[treeId]; nodeId += 2) {
const size_t level = (nodeId + 1) / 2;
assert(level > 0 && level < numNodes[treeId]);
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > leftNode =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(nodeId + treeId, level,
getPointers(samples), NUM_LABELS, previousNode);
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > rightNode =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(nodeId + 1 + treeId, level,
getPointers(samples), NUM_LABELS, previousNode);
size_t featureId = sampler.getNext();
float threshold = sampler.getNext() / 200.0 - 100.0;
ScoreType score = sampler.getNext() / 1000.0;
assertProbability(score);
ImageFeatureFunction feature(static_cast<FeatureType>(typeSampler.getNext()),
Offset(offsetSampler.getNext(), offsetSampler.getNext()),
Region(regionSampler.getNext(), regionSampler.getNext()),
channelSampler.getNext(),
Offset(offsetSampler.getNext(), offsetSampler.getNext()),
Region(regionSampler.getNext(), regionSampler.getNext()),
channelSampler.getNext());
SplitFunction<PixelInstance, ImageFeatureFunction> split(featureId, feature, threshold, score);
previousNode->addChildren(split, leftNode, rightNode);
splits[previousNode] = split;
nodes.push_back(leftNode);
nodes.push_back(rightNode);
previousNode = leftNode;
}
BOOST_CHECK_EQUAL(treeId, rootNode->getTreeId());
BOOST_CHECK(rootNode->isRoot());
BOOST_CHECK_EQUAL(numNodes[treeId] + 1, rootNode->countNodes());
cuv::ndarray<WeightType, cuv::host_memory_space> classLabelPriorDistribution(NUM_LABELS);
for (size_t i = 0; i < NUM_LABELS; i++) {
classLabelPriorDistribution[i] = 100;
}
boost::shared_ptr<RandomTreeImage> randomTreeImage =
boost::make_shared<RandomTreeImage>(rootNode, configuration, classLabelPriorDistribution);
randomTreeImage->normalizeHistograms(0.0);
boost::shared_ptr<const TreeNodes> treeData = convertTree(randomTreeImage);
BOOST_CHECK_EQUAL(rootNode->countNodes(), static_cast<size_t>(treeData->numNodes()));
BOOST_CHECK_EQUAL(rootNode->getNumClasses(), static_cast<size_t>(treeData->numLabels()));
CURFIL_INFO("checking nodes");
assert(nodes.size() == numNodes[treeId] + 1);
for (size_t nodeId = 0; nodeId < numNodes[treeId]; nodeId++) {
const boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > node = nodes[nodeId];
if (node->isLeaf()) {
checkNode(node, treeData);
} else {
checkNode(node, treeData, &splits[node]);
}
}
CURFIL_INFO("checked " << numNodes[treeId] << " nodes of tree " << treeId);
}
}
BOOST_AUTO_TEST_SUITE_END()
| f9de5aa26da63f9c4509e4231e1a7c62f9cde77b.cu | #define BOOST_TEST_MODULE example
#include <assert.h>
#include <boost/functional/hash.hpp>
#include <boost/make_shared.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/test/included/unit_test.hpp>
#include <cuv/ndarray.hpp>
#include "random_tree_image_gpu.h"
#include "random_tree_image.h"
#include "score.h"
#include "test_common.h"
#include "utils.h"
using namespace curfil;
#define DUMP_IMAGE 0
static const int SEED = 4711;
class Fixture {
public:
Fixture() {
clearImageCache();
}
};
BOOST_FIXTURE_TEST_SUITE(RandomTreeImageGPUTest, Fixture)
template<class W>
__global__
static void calculcateScoreKernel(ScoreType* result, const size_t numClasses,
const W* leftClasses, const W* rightClasses, const unsigned int leftRightStride,
const W* allClasses, const ScoreType totalLeft, const ScoreType totalRight) {
ScoreType score = NormalizedInformationGainScore::calculateScore(numClasses, leftClasses, rightClasses,
leftRightStride,
allClasses, totalLeft, totalRight);
*result = score;
}
static ScoreType scoreOnGPU(const size_t numClasses, const cuv::ndarray<int, cuv::host_memory_space>& leftClasses,
const cuv::ndarray<int, cuv::host_memory_space>& rightClasses,
const cuv::ndarray<int, cuv::host_memory_space>& allClasses,
const ScoreType totalLeft, const ScoreType totalRight) {
cuv::ndarray<ScoreType, cuv::dev_memory_space> result(1);
const cuv::ndarray<int, cuv::dev_memory_space> leftClassesDevice(leftClasses);
const cuv::ndarray<int, cuv::dev_memory_space> rightClassesDevice(rightClasses);
const cuv::ndarray<int, cuv::dev_memory_space> allClassesDevice(allClasses);
const unsigned int leftRightStride = leftClassesDevice.stride(0);
BOOST_REQUIRE_EQUAL(leftRightStride, rightClassesDevice.stride(0));
calculcateScoreKernel<<<1,1>>>(result.ptr(), numClasses, leftClassesDevice.ptr(), rightClassesDevice.ptr(),
leftRightStride, allClassesDevice.ptr(), totalLeft, totalRight);
cudaSafeCall(cudaThreadSynchronize());
double res = result[0];
return res;
}
static ScoreType scoreOnGPU(const size_t size, const WeightType* leftClass, const WeightType* rightClass,
const WeightType* allClasses, const ScoreType totalLeft, const ScoreType totalRight) {
cuv::ndarray<int, cuv::host_memory_space> leftClassArray(size);
cuv::ndarray<int, cuv::host_memory_space> rightClassArray(size);
cuv::ndarray<int, cuv::host_memory_space> allClassesArray(size);
for (size_t i = 0; i < size; i++) {
leftClassArray[i] = leftClass[i];
rightClassArray[i] = rightClass[i];
allClassesArray[i] = allClasses[i];
}
return scoreOnGPU(size, leftClassArray, rightClassArray, allClassesArray, totalLeft, totalRight);
}
BOOST_AUTO_TEST_CASE(testInformationGainScore) {
const int numClasses = 2;
cuv::ndarray<int, cuv::host_memory_space> left(numClasses);
cuv::ndarray<int, cuv::host_memory_space> right(numClasses);
cuv::ndarray<int, cuv::host_memory_space> allClass(numClasses);
for (size_t num = 1; num < 10; num++) {
// best case scenario: score=0
left[0] = num;
right[0] = num;
left[1] = num;
right[1] = num;
allClass[0] = 2 * num;
allClass[1] = 2 * num;
ScoreType totalLeft = 2 * num;
ScoreType totalRight = 2 * num;
BOOST_REQUIRE_EQUAL(left.stride(0), 1);
ScoreType score = NormalizedInformationGainScore::calculateScore(numClasses, left.ptr(), right.ptr(),
left.stride(0), allClass.ptr(), totalLeft, totalRight);
BOOST_CHECK_CLOSE(0, score, 0);
BOOST_CHECK_CLOSE(score, scoreOnGPU(numClasses, left, right, allClass, totalLeft, totalRight), 1e-6);
// best case scenario: score=1
left[0] = 0;
right[0] = 2 * num;
left[1] = 2 * num;
right[1] = 0;
allClass[0] = 2 * num;
allClass[1] = 2 * num;
totalLeft = 2 * num;
totalRight = 2 * num;
BOOST_REQUIRE_EQUAL(left.stride(0), 1);
score = NormalizedInformationGainScore::calculateScore(numClasses, left.ptr(), right.ptr(),
left.stride(0), allClass.ptr(), totalLeft, totalRight);
BOOST_CHECK_CLOSE(1, score, 0);
BOOST_CHECK_CLOSE(score, scoreOnGPU(numClasses, left, right, allClass, totalLeft, totalRight), 1e-6);
}
left[0] = 5;
right[0] = 3;
left[1] = 8;
right[1] = 1;
allClass[0] = 8;
allClass[1] = 9;
double totalLeft = left[0] + left[1];
double totalRight = right[0] + right[1];
BOOST_REQUIRE_EQUAL(left.stride(0), 1);
ScoreType score1 = NormalizedInformationGainScore::calculateScore(numClasses, left.ptr(), right.ptr(),
left.stride(0), allClass.ptr(), totalLeft, totalRight);
BOOST_CHECK_CLOSE(0.080185, score1, 1e-4);
BOOST_CHECK_CLOSE(score1, scoreOnGPU(numClasses, left, right, allClass, totalLeft, totalRight), 1e-5);
left[0] = 2;
right[0] = 6;
left[1] = 8;
right[1] = 1;
totalLeft = left[0] + left[1];
totalRight = right[0] + right[1];
BOOST_REQUIRE_EQUAL(left.stride(0), 1);
ScoreType score2 = NormalizedInformationGainScore::calculateScore(numClasses, left.ptr(), right.ptr(),
left.stride(0), allClass.ptr(), totalLeft, totalRight);
BOOST_CHECK_GT(score2, score1);
BOOST_CHECK_CLOSE(0.33339, score2, 1e-3);
BOOST_CHECK_CLOSE(score2, scoreOnGPU(numClasses, left, right, allClass, totalLeft, totalRight), 1e-6);
// case 1 (a real case)
// histogram: [ 86 241 291 3 267 ]
// histogram left: [ 56 241 290 3 18 ]
// histogram right: [ 30 0 1 0 249 ]
{
const size_t size = 5;
const WeightType all[] = { 86, 241, 291, 3, 267 };
const WeightType left[] = { 56, 241, 290, 3, 18 };
const WeightType right[] = { 30, 0, 1, 0, 249 };
const unsigned int leftRightStride = 1;
const size_t totalLeft = std::accumulate(left, left + size, 0);
const size_t totalRight = std::accumulate(right, right + size, 0);
BOOST_REQUIRE_EQUAL(totalLeft + totalRight, std::accumulate(all, all + size, 0));
ScoreType score = NormalizedInformationGainScore::calculateScore(size, left, right,
leftRightStride, all, totalLeft, totalRight);
BOOST_CHECK_CLOSE(score, 0.491311, 1e-3);
BOOST_CHECK_CLOSE(score, scoreOnGPU(size, left, right, all, totalLeft, totalRight), 1e-6);
score = InformationGainScore::calculateScore(size, left, right,
leftRightStride, all, totalLeft, totalRight);
BOOST_CHECK_CLOSE(score, 0.690912, 1e-3);
}
{
// case 2 (constructed, obviously)
// histogram: [ 50 100 50 0 100 ]
// histogram left: [ 50 100 0 0 0 ]
// histogram right: [ 0 0 50 0 100 ]
const size_t size = 5;
const WeightType all[] = { 50, 100, 50, 0, 100 };
const WeightType left[] = { 50, 100, 0, 0, 0 };
const WeightType right[] = { 0, 0, 50, 0, 100 };
const unsigned int leftRightStride = 1;
const size_t totalLeft = std::accumulate(left, left + size, 0);
const size_t totalRight = std::accumulate(right, right + size, 0);
ScoreType score = NormalizedInformationGainScore::calculateScore(size, left, right, leftRightStride,
all, totalLeft, totalRight);
BOOST_CHECK_CLOSE(score, 0.68533, 1e-3);
score = InformationGainScore::calculateScore(size, left, right, leftRightStride,
all, totalLeft, totalRight);
BOOST_CHECK_CLOSE(score, 1.0, 1e-3);
}
}
template<class T>
static void updateHash(const boost::hash<size_t>& hasher, size_t& hash, const T& value) {
// extract from boost headers
hash ^= hasher(value) + 0x9e3779b9 + (hash << 6) + (hash >> 2);
}
template<class T>
static size_t checkScores(const cuv::ndarray<ScoreType, cuv::host_memory_space>& scores, T numFeatures,
T numThresholds) {
BOOST_CHECK_EQUAL(2, static_cast<int>(scores.ndim()));
BOOST_CHECK_EQUAL(static_cast<size_t>(numThresholds), static_cast<size_t>(scores.shape(0)));
BOOST_CHECK_EQUAL(static_cast<size_t>(numFeatures), static_cast<size_t>(scores.shape(1)));
size_t hash = 0;
boost::hash<size_t> hasher;
for (T feat = 0; feat < numFeatures; feat++) {
for (T thresh = 0; thresh < numThresholds; thresh++) {
const ScoreType score = scores(thresh, feat);
BOOST_CHECK_GE(score, 0.0);
BOOST_CHECK_LE(score, 1.0);
updateHash(hasher, hash, score);
}
}
return hash;
}
static size_t checkCounters(TrainingConfiguration& configuration,
const cuv::ndarray<WeightType, cuv::dev_memory_space> countersDevice,
const std::vector<PixelInstance>& samples) {
const cuv::ndarray<WeightType, cuv::host_memory_space> counters(countersDevice);
size_t hash = 0;
boost::hash<size_t> hasher;
std::map<size_t, size_t> samplesPerLabel;
for (size_t sample = 0; sample < samples.size(); sample++) {
samplesPerLabel[samples[sample].getLabel()]++;
}
size_t numLabels = samplesPerLabel.size();
assert(numLabels > 0);
const size_t features = configuration.getFeatureCount();
const size_t thresholds = configuration.getThresholds();
BOOST_CHECK_EQUAL(4, static_cast<int>(counters.ndim()));
BOOST_CHECK_EQUAL(features, static_cast<size_t>(counters.shape(0)));
BOOST_CHECK_EQUAL(thresholds, static_cast<size_t>(counters.shape(1)));
BOOST_CHECK_EQUAL(numLabels, static_cast<size_t>(counters.shape(2)));
BOOST_CHECK_EQUAL(2lu, static_cast<size_t>(counters.shape()[3]));
for (size_t label = 0; label < numLabels; label++) {
for (size_t thresh = 0; thresh < thresholds; thresh++) {
for (size_t feat = 0; feat < features; feat++) {
const size_t left = counters(feat, thresh, label, 0);
const size_t right = counters(feat, thresh, label, 1);
const size_t numSamples = samplesPerLabel[label];
BOOST_CHECK_EQUAL(numSamples, left + right);
updateHash(hasher, hash, left);
updateHash(hasher, hash, right);
}
}
}
return hash;
}
BOOST_AUTO_TEST_CASE(testDepthFeatureSimple) {
const int NUM_FEAT = 1;
const int NUM_THRESH = 100;
unsigned int samplesPerImage = 500;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 16;
static const int NUM_THREADS = 1;
static const int maxImages = 0;
static const int imageCacheSize = 1;
unsigned int maxSamplesPerBatch = 5000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
ImageFeatureEvaluation featureFunction(0, configuration);
static const int width = 16;
static const int height = 20;
std::vector<RGBDImage> images(1, RGBDImage(width, height));
std::vector<PixelInstance> samples;
ImageFeaturesAndThresholds<cuv::dev_memory_space> featuresAndThresholds(NUM_FEAT, NUM_THRESH,
boost::make_shared<cuv::default_allocator>());
for (int i = 0; i < NUM_THRESH; i++) {
featuresAndThresholds.thresholds()(i, 0) = (i - 50) / 10.0f;
}
featuresAndThresholds.types()[0] = DEPTH;
featuresAndThresholds.offset1X()[0] = 1;
featuresAndThresholds.offset1Y()[0] = 1;
featuresAndThresholds.region1X()[0] = 2;
featuresAndThresholds.region1Y()[0] = 2;
featuresAndThresholds.offset2X()[0] = -3;
featuresAndThresholds.offset2Y()[0] = -1;
featuresAndThresholds.region2X()[0] = 1;
featuresAndThresholds.region2Y()[0] = 1;
featuresAndThresholds.channel1()[0] = 0;
featuresAndThresholds.channel2()[0] = 0;
const int NUM_LABELS = 2;
samples.push_back(PixelInstance(&images[0], 0, Depth(1.0), 6, 3));
samples.push_back(PixelInstance(&images[0], 1, Depth(1.0), 6, 3));
RandomTree<PixelInstance, ImageFeatureFunction> node(0, 0, getPointers(samples), NUM_LABELS);
cuv::ndarray<WeightType, cuv::dev_memory_space> histogram(node.getHistogram());
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples), node,
cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
BOOST_CHECK(isnan(static_cast<FeatureResponseType>(featureResponses(0, 0))));
cuv::ndarray<ScoreType, cuv::host_memory_space> scores(featureFunction.calculateScores(counters,
featuresAndThresholds, histogram));
checkScores(scores, NUM_FEAT, NUM_THRESH);
}
images[0].reset();
clearImageCache();
images[0].setDepth(7, 3, Depth(1.5f));
images[0].setDepth(7, 4, Depth(1.7f));
images[0].setDepth(8, 4, Depth(4.5f));
images[0].setDepth(3, 2, Depth(3.9f));
#if DUMP_IMAGE
image.dumpDepth(std::cout);
#endif
images[0].calculateIntegral();
#if DUMP_IMAGE
CURFIL_INFO("integral:");
image.dumpDepth(std::cout);
#endif
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
cuv::ndarray<ScoreType, cuv::host_memory_space> scores(featureFunction.calculateScores(counters,
featuresAndThresholds, histogram));
checkCounters(configuration, counters, samples);
checkScores(scores, NUM_FEAT, NUM_THRESH);
BOOST_CHECK_CLOSE((1.5 + 1.7 + 4.5) / 3 - 3.9, static_cast<FeatureResponseType>(featureResponses(0, 0)), 1e-6);
for (int label = 0; label < NUM_LABELS; label++) {
for (int thresh = 0; thresh < NUM_THRESH; thresh++) {
for (int feat = 0; feat < NUM_FEAT; feat++) {
BOOST_CHECK_EQUAL(1,
static_cast<int>(counters(feat, thresh, label, 0) + counters(feat, thresh, label, 1)));
BOOST_CHECK_EQUAL(static_cast<unsigned int>(thresh >= 37),
static_cast<unsigned int>(counters(feat, thresh, label, 0)));
}
}
}
images[0].reset();
clearImageCache();
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
images[0].setDepth(x, y, Depth(1.0f));
}
}
images[0].calculateIntegral();
#if DUMP_IMAGE
image.dumpDepth(std::cout);
#endif
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
cuv::ndarray<ScoreType, cuv::host_memory_space> scores(featureFunction.calculateScores(counters,
featuresAndThresholds, histogram));
BOOST_CHECK_CLOSE(0, static_cast<FeatureResponseType>(featureResponses(0, 0)), 0);
checkScores(scores, NUM_FEAT, NUM_THRESH);
checkCounters(configuration, counters, samples);
for (int label = 0; label < NUM_LABELS; label++) {
for (int thresh = 0; thresh < 100; thresh++) {
for (int feat = 0; feat < NUM_FEAT; feat++) {
BOOST_CHECK_EQUAL(1,
static_cast<int>(counters(feat, thresh, label, 0) + counters(feat, thresh, label, 1)));
BOOST_CHECK_EQUAL(static_cast<unsigned int>(thresh >= 50),
static_cast<unsigned int>(counters(feat, thresh, label, 0)));
}
}
}
}
}
}
BOOST_AUTO_TEST_CASE(testColorFeatureSimple) {
unsigned int samplesPerImage = 500;
unsigned int NUM_FEAT = 1;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 16;
static const uint16_t NUM_THRESH = 3;
static const int NUM_THREADS = 1;
static const int maxImages = 0;
static const int imageCacheSize = 1;
unsigned int maxSamplesPerBatch = 5000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
ImageFeatureEvaluation featureFunction(0, configuration);
static const int width = 16;
static const int height = 20;
std::vector<RGBDImage> images(1, RGBDImage(width, height));
std::vector<PixelInstance> samples;
ImageFeaturesAndThresholds<cuv::dev_memory_space> featuresAndThresholds(NUM_FEAT, NUM_THRESH,
boost::make_shared<cuv::default_allocator>());
featuresAndThresholds.types()[0] = COLOR;
featuresAndThresholds.offset1X()[0] = 3;
featuresAndThresholds.offset1Y()[0] = -2;
featuresAndThresholds.region1X()[0] = 4;
featuresAndThresholds.region1Y()[0] = 2;
featuresAndThresholds.offset2X()[0] = -3;
featuresAndThresholds.offset2Y()[0] = -2;
featuresAndThresholds.region2X()[0] = 1;
featuresAndThresholds.region2Y()[0] = 2;
featuresAndThresholds.channel1()[0] = 0;
featuresAndThresholds.channel2()[0] = 0;
featuresAndThresholds.thresholds()(0, 0) = -1.0f;
featuresAndThresholds.thresholds()(1, 0) = 0.0f;
featuresAndThresholds.thresholds()(2, 0) = 1.0f;
const int NUM_LABELS = 2;
samples.push_back(PixelInstance(&images[0], 0, Depth(1.0), 6, 4));
samples.push_back(PixelInstance(&images[0], 1, Depth(1.0), 6, 4));
RandomTree<PixelInstance, ImageFeatureFunction> node(0, 0, getPointers(samples), NUM_LABELS);
cuv::ndarray<WeightType, cuv::dev_memory_space> histogram(node.getHistogram());
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples), node,
cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
BOOST_CHECK_CLOSE(0, static_cast<FeatureResponseType>(featureResponses(0, 0)), 0);
checkCounters(configuration, counters, samples);
assert(static_cast<int>(samples.size()) == NUM_LABELS);
for (size_t label = 0; label < samples.size(); label++) {
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 0, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 0, label, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, label, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 1, label, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 2, label, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 2, label, 1)));
}
}
images[0].reset();
clearImageCache();
images[0].setColor(7, 2, 0, 0.5f);
#if DUMP_IMAGE
image.dump(std::cout);
#endif
images[0].calculateIntegral();
#if DUMP_IMAGE
std::cout << "integral" << std::endl;
image.dump(std::cout);
#endif
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
checkCounters(configuration, counters, samples);
BOOST_CHECK_CLOSE(0.5, static_cast<FeatureResponseType>(featureResponses(0, 0)), 0);
for (size_t label = 0; label < samples.size(); label++) {
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 0, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 0, label, 1)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 1, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, label, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 2, label, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 2, label, 1)));
}
}
images[0].reset();
clearImageCache();
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
images[0].setColor(x, y, 0, 1.0f);
}
}
images[0].calculateIntegral();
#if DUMP_IMAGE
image.dump(std::cout);
#endif
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
BOOST_CHECK_CLOSE(8 * 4 - 4 * 2.0, static_cast<FeatureResponseType>(featureResponses(0, 0)), 0);
checkCounters(configuration, counters, samples);
for (size_t label = 0; label < samples.size(); label++) {
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 0, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 0, label, 1)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 1, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, label, 1)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 2, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 2, label, 1)));
}
}
}
BOOST_AUTO_TEST_CASE(testColorFeatureComplex) {
const size_t NUM_THRESH = 2;
const size_t NUM_FEAT = 3;
const int maxImages = 5;
const int imageCacheSize = 5; // make sure the cache is at least as big as #images
unsigned int samplesPerImage = 500;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 16;
static const int NUM_THREADS = 1;
unsigned int maxSamplesPerBatch = 5000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
ImageFeatureEvaluation featureFunction(0, configuration);
const int width = 12;
const int height = 15;
std::vector<RGBDImage> images(2, RGBDImage(width, height));
std::vector<PixelInstance> samples;
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
for (int c = 0; c < 3; c++) {
float v = 100 * c + y * width + x;
images[0].setColor(x, y, c, v);
images[1].setColor(x, y, c, v / 2.0f);
}
}
}
BOOST_CHECK_CLOSE(179, images[0].getColor(width - 1, height - 1, 0), 0);
BOOST_CHECK_CLOSE(279, images[0].getColor(width - 1, height - 1, 1), 0);
BOOST_CHECK_CLOSE(379, images[0].getColor(width - 1, height - 1, 2), 0);
BOOST_CHECK_CLOSE(179 / 2.0, images[1].getColor(width - 1, height - 1, 0), 0);
BOOST_CHECK_CLOSE(279 / 2.0, images[1].getColor(width - 1, height - 1, 1), 0);
BOOST_CHECK_CLOSE(379 / 2.0, images[1].getColor(width - 1, height - 1, 2), 0);
images[0].calculateIntegral();
images[1].calculateIntegral();
BOOST_CHECK_CLOSE(16110, images[0].getColor(width - 1, height - 1, 0), 0);
BOOST_CHECK_CLOSE(34110, images[0].getColor(width - 1, height - 1, 1), 0);
BOOST_CHECK_CLOSE(52110, images[0].getColor(width - 1, height - 1, 2), 0);
BOOST_CHECK_CLOSE(16110 / 2.0, images[1].getColor(width - 1, height - 1, 0), 0);
BOOST_CHECK_CLOSE(34110 / 2.0, images[1].getColor(width - 1, height - 1, 1), 0);
BOOST_CHECK_CLOSE(52110 / 2.0, images[1].getColor(width - 1, height - 1, 2), 0);
ImageFeaturesAndThresholds<cuv::dev_memory_space> featuresAndThresholds(NUM_FEAT, NUM_THRESH,
boost::make_shared<cuv::default_allocator>());
featuresAndThresholds.types()[0] = COLOR;
featuresAndThresholds.offset1X()[0] = 2;
featuresAndThresholds.offset1Y()[0] = -1;
featuresAndThresholds.region1X()[0] = 2;
featuresAndThresholds.region1Y()[0] = 1;
featuresAndThresholds.offset2X()[0] = -3;
featuresAndThresholds.offset2Y()[0] = 4;
featuresAndThresholds.region2X()[0] = 1;
featuresAndThresholds.region2Y()[0] = 2;
featuresAndThresholds.channel1()[0] = 0;
featuresAndThresholds.channel2()[0] = 2;
featuresAndThresholds.types()[1] = COLOR;
featuresAndThresholds.offset1X()[1] = 2;
featuresAndThresholds.offset1Y()[1] = -1;
featuresAndThresholds.region1X()[1] = 2;
featuresAndThresholds.region1Y()[1] = 2;
featuresAndThresholds.offset2X()[1] = -3;
featuresAndThresholds.offset2Y()[1] = 4;
featuresAndThresholds.region2X()[1] = 1;
featuresAndThresholds.region2Y()[1] = 1;
featuresAndThresholds.channel1()[1] = 1;
featuresAndThresholds.channel2()[1] = 2;
featuresAndThresholds.types()[2] = COLOR;
featuresAndThresholds.offset1X()[2] = -2;
featuresAndThresholds.offset1Y()[2] = 1;
featuresAndThresholds.region1X()[2] = 3;
featuresAndThresholds.region1Y()[2] = 1;
featuresAndThresholds.offset2X()[2] = 3;
featuresAndThresholds.offset2Y()[2] = -4;
featuresAndThresholds.region2X()[2] = 3;
featuresAndThresholds.region2Y()[2] = 3;
featuresAndThresholds.channel1()[2] = 1;
featuresAndThresholds.channel2()[2] = 0;
featuresAndThresholds.thresholds()(0, 0) = 0.0f;
featuresAndThresholds.thresholds()(1, 0) = -500.0f;
featuresAndThresholds.thresholds()(0, 1) = -300.0f;
featuresAndThresholds.thresholds()(1, 1) = 0.0f;
featuresAndThresholds.thresholds()(0, 2) = 0.0f;
featuresAndThresholds.thresholds()(1, 2) = 500.0f;
const int NUM_LABELS = 2;
samples.push_back(PixelInstance(&images[0], 0, Depth(1.0), 6, 4));
samples.push_back(PixelInstance(&images[1], 0, Depth(2.0), 6, 4));
samples.push_back(PixelInstance(&images[0], 1, Depth(1.5), 5, 5));
samples.push_back(PixelInstance(&images[1], 1, Depth(3.1), 3, 4));
RandomTree<PixelInstance, ImageFeatureFunction> node(0, 0, getPointers(samples), NUM_LABELS);
cuv::ndarray<WeightType, cuv::dev_memory_space> histogram(node.getHistogram());
// 2 images, 3 features, 4 samples
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
cuv::ndarray<ScoreType, cuv::host_memory_space> scores(
featureFunction.calculateScores(counters,
featuresAndThresholds, histogram));
BOOST_CHECK_EQUAL(2, static_cast<int>(featureResponses.ndim()));
BOOST_CHECK_EQUAL(3, static_cast<int>(featureResponses.shape(0)));
BOOST_CHECK_EQUAL(4, static_cast<int>(featureResponses.shape(1)));
checkScores(scores, NUM_FEAT, NUM_THRESH);
// values verified by manual calculation
// sample 0, feat 0
BOOST_CHECK_CLOSE(-2040, static_cast<FeatureResponseType>(featureResponses(0, 0)), 0);
// sample 0, feat 1
BOOST_CHECK_CLOSE(1186, static_cast<FeatureResponseType>(featureResponses(1, 0)), 0);
// sample 0, feat 2
BOOST_CHECK(isnan(static_cast<FeatureResponseType>(featureResponses(2, 0))));
// sample 1, feat 0
BOOST_CHECK_CLOSE(-444, static_cast<FeatureResponseType>(featureResponses(0, 1)), 0);
// sample 1, feat 1
BOOST_CHECK_CLOSE(-244, static_cast<FeatureResponseType>(featureResponses(1, 1)), 0);
// sample 1, feat 2
BOOST_CHECK_CLOSE(244, static_cast<FeatureResponseType>(featureResponses(2, 1)), 0);
// sample 2, feat 0
BOOST_CHECK_CLOSE(-884, static_cast<FeatureResponseType>(featureResponses(0, 2)), 0);
// sample 2, feat 1
BOOST_CHECK_CLOSE(-484, static_cast<FeatureResponseType>(featureResponses(1, 2)), 0);
// sample 2, feat 2
BOOST_CHECK_CLOSE(572, static_cast<FeatureResponseType>(featureResponses(2, 2)), 0);
// sample 3, feat 0
BOOST_CHECK_CLOSE(-424, static_cast<FeatureResponseType>(featureResponses(0, 3)), 0);
// sample 3, feat 1
BOOST_CHECK_CLOSE(-224, static_cast<FeatureResponseType>(featureResponses(1, 3)), 0);
// sample 3, feat 2
BOOST_CHECK_CLOSE(224, static_cast<FeatureResponseType>(featureResponses(2, 3)), 0);
checkCounters(configuration, counters, samples);
// -2040 sample 0, feat 0 → 0
// -444 sample 1, feat 0 → 0
// -884 sample 2, feat 0 → 1
// -424 sample 3, feat 0 → 1
// 1186 sample 0, feat 1 → 0
// -244 sample 1, feat 1 → 0
// -484 sample 2, feat 1 → 1
// -224 sample 3, feat 1 → 1
// 1551 sample 0, feat 2 → 0
// 244 sample 1, feat 2 → 0
// 572 sample 2, feat 2 → 1
// 224 sample 3, feat 2 → 1
// feat 0, thresh 0 (0.0f)
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(0, 0, 0, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 0, 0, 1)));
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(0, 0, 1, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 0, 1, 1)));
// feat 0, thresh 1 (-500.0f)
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, 0, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, 0, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, 1, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, 1, 1)));
// feat 1, thresh 0 (-300.0f)
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(1, 0, 0, 0)));
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(1, 0, 0, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(1, 0, 1, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(1, 0, 1, 1)));
// feat 1, thresh 1 (0.0f)
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(1, 1, 0, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(1, 1, 0, 1)));
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(1, 1, 1, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(1, 1, 1, 1)));
// feat 2, thresh 0 (0.0f)
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(2, 0, 0, 0)));
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(2, 0, 0, 1)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(2, 0, 1, 0)));
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(2, 0, 1, 1)));
// feat 2, thresh 1 (+500.0f)
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(2, 1, 0, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(2, 1, 0, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(2, 1, 1, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(2, 1, 1, 1)));
}
BOOST_AUTO_TEST_CASE(testColorFeatureManySamples) {
const int NUM_FEAT = 1000;
const int NUM_THRESH = 50;
unsigned int samplesPerImage = 100;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 50;
static const int NUM_THREADS = 1;
static const int maxImages = 5;
static const int imageCacheSize = 5;
unsigned int maxSamplesPerBatch = 100000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
ImageFeatureEvaluation featureFunction(0, configuration);
std::vector<PixelInstance> samples;
const int width = 640;
const int height = 480;
std::vector<RGBDImage> images(10, RGBDImage(width, height));
for (size_t image = 0; image < images.size(); image++) {
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
for (int c = 0; c < 3; c++) {
float v = 10000 * image + 100 * c + y * width + x;
images[image].setColor(x, y, c, v);
}
}
}
images[image].calculateIntegral();
}
const size_t NUM_LABELS = 10;
const int NUM_SAMPLES = samplesPerImage * images.size();
for (int i = 0; i < NUM_SAMPLES; i++) {
PixelInstance sample(
&images.at(i / (NUM_SAMPLES / images.size())), // image
i / 100, // label
Depth((i % 20) / 10.0 + 1.0), // depth
i % width, // x
i % height // y
);
samples.push_back(sample);
}
RandomTree<PixelInstance, ImageFeatureFunction> node(0, 0, getPointers(samples), NUM_LABELS);
cuv::ndarray<WeightType, cuv::dev_memory_space> histogram(node.getHistogram());
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
ImageFeaturesAndThresholds<cuv::dev_memory_space> featuresAndThresholds =
featureFunction.generateRandomFeatures(batches[0], configuration.getRandomSeed(),
true, cuv::dev_memory_space());
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(
node, batches, featuresAndThresholds);
cuv::ndarray<ScoreType, cuv::host_memory_space> scores(
featureFunction.calculateScores(counters,
featuresAndThresholds, histogram));
size_t scoreHash = checkScores(scores, NUM_FEAT, NUM_THRESH);
size_t counterHash = checkCounters(configuration, counters, samples);
// magic number. used to check for regressions
BOOST_CHECK_EQUAL(4437303196209240250lu, counterHash);
BOOST_CHECK_EQUAL(13702092111133522162lu, scoreHash);
}
}
static void checkNode(boost::shared_ptr<const RandomTree<PixelInstance, ImageFeatureFunction> > node,
const boost::shared_ptr<const TreeNodes>& treeData,
const SplitFunction<PixelInstance, ImageFeatureFunction>* split = 0) {
const size_t numLabels = node->getHistogram().size();
const size_t nodeNr = node->getNodeId();
assert(nodeNr - node->getTreeId() < treeData->numNodes());
TreeNodeData data = getTreeNode(nodeNr, treeData);
if (node->isLeaf()) {
BOOST_CHECK_EQUAL(-1, static_cast<int>(data.leftNodeOffset));
BOOST_CHECK(isnan(static_cast<float>(data.threshold)));
for (size_t label = 0; label < numLabels; label++) {
BOOST_CHECK_EQUAL(static_cast<float>(node->getNormalizedHistogram()[label]),
static_cast<float>(data.histogram(label)));
}
} else {
BOOST_REQUIRE(split);
const ImageFeatureFunction& feature = split->getFeature();
const float expectedThreshold = split->getThreshold();
const int expectedLeftNodeOffset = node->getLeft()->getNodeId() - node->getNodeId();
BOOST_CHECK_EQUAL(expectedLeftNodeOffset, static_cast<int>(data.leftNodeOffset));
BOOST_CHECK_EQUAL(expectedThreshold, static_cast<float>(data.threshold));
BOOST_CHECK_EQUAL(static_cast<int>(feature.getType()), static_cast<int>(data.type));
BOOST_CHECK_EQUAL(feature.getOffset1().getX(), static_cast<int>(data.offset1X));
BOOST_CHECK_EQUAL(feature.getOffset1().getY(), static_cast<int>(data.offset1Y));
BOOST_CHECK_EQUAL(feature.getRegion1().getX(), static_cast<int>(data.region1X));
BOOST_CHECK_EQUAL(feature.getRegion1().getY(), static_cast<int>(data.region1Y));
BOOST_CHECK_EQUAL(feature.getOffset2().getX(), static_cast<int>(data.offset2X));
BOOST_CHECK_EQUAL(feature.getOffset2().getY(), static_cast<int>(data.offset2Y));
BOOST_CHECK_EQUAL(feature.getRegion2().getX(), static_cast<int>(data.region2X));
BOOST_CHECK_EQUAL(feature.getRegion2().getY(), static_cast<int>(data.region2Y));
BOOST_CHECK_EQUAL(feature.getChannel1(), static_cast<uint8_t>(data.channel1));
BOOST_CHECK_EQUAL(feature.getChannel2(), static_cast<uint8_t>(data.channel2));
}
}
BOOST_AUTO_TEST_CASE(testRecallOnGPU) {
const int NUM_FEAT = 1000;
const int NUM_THRESH = 50;
unsigned int samplesPerImage = 100;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 50;
static const int NUM_THREADS = 1;
static const int maxImages = 5;
static const int imageCacheSize = 5;
unsigned int maxSamplesPerBatch = 100000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
ImageFeatureEvaluation featureFunction(0, configuration);
std::vector<PixelInstance> samples;
const int width = 640;
const int height = 480;
std::vector<RGBDImage> images(10, RGBDImage(width, height));
for (size_t image = 0; image < images.size(); image++) {
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
for (int c = 0; c < 3; c++) {
float v = 10000 * image + 100 * c + y * width + x;
images[image].setColor(x, y, c, v);
}
}
}
images[image].calculateIntegral();
}
const size_t NUM_LABELS = 3;
const int NUM_SAMPLES = samplesPerImage * images.size();
for (int i = 0; i < NUM_SAMPLES; i++) {
PixelInstance sample(
&images.at(i / (NUM_SAMPLES / images.size())), // image
(i / 100) % NUM_LABELS, // label
Depth((i % 20) / 10.0 + 0.1), // depth
i % width, // x
i % height // y
);
samples.push_back(sample);
}
/**
* n0
* / \
* / \
* n1 n2
* / \
* / \
* n3 n4
*/
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > n0 =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(0, 0, getPointers(samples),
NUM_LABELS);
std::vector<WeightType> histN1(NUM_LABELS, 0);
histN1[0] = 10;
histN1[1] = 10;
histN1[2] = 10;
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > n1 =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(1, 1, n0, histN1);
std::vector<WeightType> histN2(NUM_LABELS, 0);
histN2[0] = 60;
histN2[1] = 60;
histN2[2] = 20;
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > n2 =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(2, 1, n0, histN2);
std::vector<WeightType> histN3(NUM_LABELS, 0);
histN3[0] = 10;
histN3[2] = 20;
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > n3 =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(3, 2, n1, histN3);
std::vector<WeightType> histN4(NUM_LABELS, 0);
histN4[1] = 50;
histN4[2] = 20;
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > n4 =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(4, 2, n1, histN4);
size_t featureId1 = 1;
float threshold1 = 28.391;
ScoreType score1 = 0.392;
ImageFeatureFunction feature1(COLOR,
Offset(-10, 5), Region(7, 3), 1,
Offset(27, -19), Region(65, 73), 2);
SplitFunction<PixelInstance, ImageFeatureFunction> split1(featureId1, feature1, threshold1, score1);
n1->addChildren(split1, n3, n4);
size_t featureId2 = 2;
float threshold2 = -29.1245;
ScoreType score2 = 0.9371;
ImageFeatureFunction feature2(DEPTH,
Offset(-18, 25), Region(4, 19), 0,
Offset(9, 28), Region(1, 16), 0);
SplitFunction<PixelInstance, ImageFeatureFunction> split2(featureId2, feature2, threshold2, score2);
n0->addChildren(split2, n1, n2);
BOOST_CHECK(n0->isRoot());
BOOST_CHECK_EQUAL(5, n0->countNodes());
cuv::ndarray<WeightType, cuv::host_memory_space> classLabelPriorDistribution(NUM_LABELS);
for (size_t i = 0; i < NUM_LABELS; i++) {
classLabelPriorDistribution[i] = 100;
}
boost::shared_ptr<RandomTreeImage> randomTreeImage = boost::make_shared<RandomTreeImage>(n0, configuration,
classLabelPriorDistribution);
randomTreeImage->normalizeHistograms(0.0);
boost::shared_ptr<const TreeNodes> treeData = convertTree(randomTreeImage);
BOOST_CHECK_EQUAL(n0->countNodes(), static_cast<size_t>(treeData->numNodes()));
BOOST_CHECK_EQUAL(n0->getNumClasses(), static_cast<size_t>(treeData->numLabels()));
checkNode(n0, treeData, &split2);
checkNode(n1, treeData, &split1);
BOOST_REQUIRE(n2->isLeaf());
BOOST_REQUIRE(n3->isLeaf());
BOOST_REQUIRE(n4->isLeaf());
checkNode(n2, treeData);
checkNode(n3, treeData);
checkNode(n4, treeData);
// do classify
const size_t treeCacheSize = 3;
{
RGBDImage image(640, 480);
image.calculateIntegral();
{
utils::Profile classifyImageTimer("classifyImage");
cuv::ndarray<float, cuv::dev_memory_space> output(
cuv::extents[NUM_LABELS][image.getHeight()][image.getWidth()]);
cudaSafeCall(cudaMemset(output.ptr(), 0, static_cast<size_t>(output.size() * sizeof(float))));
classifyImage(treeCacheSize, output, image, NUM_LABELS, treeData);
}
}
}
BOOST_AUTO_TEST_CASE(testRecallLargeForest) {
unsigned int samplesPerImage = 100;
const int NUM_FEAT = 1000;
const int NUM_THRESH = 50;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 50;
static const int NUM_THREADS = 1;
static const int maxImages = 5;
static const int imageCacheSize = 5;
unsigned int maxSamplesPerBatch = 100000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
std::vector<PixelInstance> samples;
const size_t NUM_LABELS = 3;
const int width = 640;
const int height = 480;
std::vector<RGBDImage> images(10, RGBDImage(width, height));
const int NUM_SAMPLES = samplesPerImage * images.size();
for (int i = 0; i < NUM_SAMPLES; i++) {
PixelInstance sample(
&images.at(i / (NUM_SAMPLES / images.size())), // image
(i / 100) % NUM_LABELS, // label
Depth((i % 20) / 10.0 + 0.1), // depth
i % width, // x
i % height // y
);
samples.push_back(sample);
}
// explicitly test a tree that exceeds the maximal number of nodes per layer
const size_t numNodes[] = { 10, 100, 3 * NODES_PER_TREE_LAYER + 2 };
Sampler sampler(4711, 0, 1000);
Sampler typeSampler(4711, 0, 1);
Sampler channelSampler(4711, 0, 5);
Sampler offsetSampler(4711, -120, 120);
Sampler regionSampler(4711, 0, 20);
for (size_t treeId = 0; treeId < 3; treeId++) {
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > rootNode =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(treeId, 0, getPointers(samples),
NUM_LABELS);
std::vector<boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > > nodes;
std::map<boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> >,
SplitFunction<PixelInstance, ImageFeatureFunction> > splits;
nodes.push_back(rootNode);
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > previousNode = rootNode;
/**
* creates a degenerated tree with N nodes
*
* n0
* / \
* / \
* n1 n2
* / \
* / \
* n3 n4
* /
* /
* n5
* / \
* / \
* n6 n7
* /
* /
* ...
*/
for (size_t nodeId = 1; nodeId < numNodes[treeId]; nodeId += 2) {
const size_t level = (nodeId + 1) / 2;
assert(level > 0 && level < numNodes[treeId]);
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > leftNode =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(nodeId + treeId, level,
getPointers(samples), NUM_LABELS, previousNode);
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > rightNode =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(nodeId + 1 + treeId, level,
getPointers(samples), NUM_LABELS, previousNode);
size_t featureId = sampler.getNext();
float threshold = sampler.getNext() / 200.0 - 100.0;
ScoreType score = sampler.getNext() / 1000.0;
assertProbability(score);
ImageFeatureFunction feature(static_cast<FeatureType>(typeSampler.getNext()),
Offset(offsetSampler.getNext(), offsetSampler.getNext()),
Region(regionSampler.getNext(), regionSampler.getNext()),
channelSampler.getNext(),
Offset(offsetSampler.getNext(), offsetSampler.getNext()),
Region(regionSampler.getNext(), regionSampler.getNext()),
channelSampler.getNext());
SplitFunction<PixelInstance, ImageFeatureFunction> split(featureId, feature, threshold, score);
previousNode->addChildren(split, leftNode, rightNode);
splits[previousNode] = split;
nodes.push_back(leftNode);
nodes.push_back(rightNode);
previousNode = leftNode;
}
BOOST_CHECK_EQUAL(treeId, rootNode->getTreeId());
BOOST_CHECK(rootNode->isRoot());
BOOST_CHECK_EQUAL(numNodes[treeId] + 1, rootNode->countNodes());
cuv::ndarray<WeightType, cuv::host_memory_space> classLabelPriorDistribution(NUM_LABELS);
for (size_t i = 0; i < NUM_LABELS; i++) {
classLabelPriorDistribution[i] = 100;
}
boost::shared_ptr<RandomTreeImage> randomTreeImage =
boost::make_shared<RandomTreeImage>(rootNode, configuration, classLabelPriorDistribution);
randomTreeImage->normalizeHistograms(0.0);
boost::shared_ptr<const TreeNodes> treeData = convertTree(randomTreeImage);
BOOST_CHECK_EQUAL(rootNode->countNodes(), static_cast<size_t>(treeData->numNodes()));
BOOST_CHECK_EQUAL(rootNode->getNumClasses(), static_cast<size_t>(treeData->numLabels()));
CURFIL_INFO("checking nodes");
assert(nodes.size() == numNodes[treeId] + 1);
for (size_t nodeId = 0; nodeId < numNodes[treeId]; nodeId++) {
const boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > node = nodes[nodeId];
if (node->isLeaf()) {
checkNode(node, treeData);
} else {
checkNode(node, treeData, &splits[node]);
}
}
CURFIL_INFO("checked " << numNodes[treeId] << " nodes of tree " << treeId);
}
}
BOOST_AUTO_TEST_SUITE_END()
|
ca05a0f2423f9074751a269fdc9125c3610909e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
swsharp - CUDA parallelized Smith Waterman with applying Hirschberg's and
Ukkonen's algorithm and dynamic cell pruning.
Copyright (C) 2013 Matija Korpar, contributor Mile iki
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Contact the author by [email protected].
*/
#ifdef __HIPCC__
#include <stdlib.h>
#include <stdio.h>
#include "chain.h"
#include "cuda_utils.h"
#include "constants.h"
#include "error.h"
#include "scorer.h"
#include "thread.h"
#include "utils.h"
#include "gpu_module.h"
#define MAX_THREADS MAX(THREADS_SM1, THREADS_SM2)
#define THREADS_SM1 64
#define BLOCKS_SM1 240
#define THREADS_SM2 128
#define BLOCKS_SM2 480
#define INT4_ZERO make_int4(0, 0, 0, 0)
#define SCORE4_MIN make_int4(SCORE_MIN, SCORE_MIN, SCORE_MIN, SCORE_MIN)
typedef struct Atom {
int mch;
int2 up;
int4 lScr;
int4 lAff;
int4 rScr;
int4 rAff;
} Atom;
typedef struct VBus {
int* mch;
int4* scr;
int4* aff;
} VBus;
typedef struct Context {
int* queryEnd;
int* targetEnd;
int* outScore;
Chain* query;
Chain* target;
Scorer* scorer;
int score;
int card;
} Context;
static __constant__ int gapOpen_;
static __constant__ int gapExtend_;
static __constant__ int rows_;
static __constant__ int cols_;
static __constant__ int cellWidth_;
static __constant__ int scorerLen_;
static __constant__ int subLen_;
static __constant__ int match_;
static __constant__ int mismatch_;
texture<char4> rowTexture;
texture<char> colTexture;
texture<int2> hBusTexture;
texture<int> subTexture;
//******************************************************************************
// PUBLIC
extern void ovEndDataGpu(int* queryEnd, int* targetEnd, int* outScore,
Chain* query, Chain* target, Scorer* scorer, int score, int card,
Thread* thread);
//******************************************************************************
//******************************************************************************
// PRIVATE
// With visual c++ compiler and prototypes declared cuda global memory variables
// do not work. No questions asked.
#ifndef _WIN32
template<class Sub>
__device__ static void solveShortDelegated(int d, VBus vBus, int2* hBus,
int3* results, Sub sub);
template<class Sub>
__device__ static void solveShortNormal(int d, VBus vBus, int2* hBus,
int3* results, Sub sub);
template<class Sub>
__global__ static void solveShort(int d, VBus vBus, int2* hBus, int3* results,
Sub sub);
template<class Sub>
__global__ static void solveLong(int d, VBus vBus, int2* hBus, int3* results,
Sub sub);
#endif
static void* kernel(void* params);
//******************************************************************************
//******************************************************************************
// PUBLIC
extern void ovEndDataGpu(int* queryEnd, int* targetEnd, int* outScore,
Chain* query, Chain* target, Scorer* scorer, int score, int card,
Thread* thread) {
Context* param = (Context*) malloc(sizeof(Context));
param->queryEnd = queryEnd;
param->targetEnd = targetEnd;
param->outScore = outScore;
param->query = query;
param->target = target;
param->scorer = scorer;
param->score = score;
param->card = card;
if (thread == NULL) {
kernel(param);
} else {
threadCreate(thread, kernel, (void*) param);
}
}
//******************************************************************************
//******************************************************************************
// PRIVATE
//------------------------------------------------------------------------------
// FUNCTORS
class SubScalarRev {
public:
__device__ int operator () (char a, char b) {
return (a == b ? match_ : mismatch_) * (a < scorerLen_ && b < scorerLen_);
}
};
class SubVector {
public:
__device__ int operator () (char a, char b) {
return tex1Dfetch(subTexture, (a * subLen_) + b);
}
};
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// GPU KERNELS
template<class Sub>
__device__ static void solveShortDelegated(int d, VBus vBus, int2* hBus,
int3* results, Sub sub) {
__shared__ int hBusScrShr[MAX_THREADS];
__shared__ int hBusAffShr[MAX_THREADS];
int row = (d + blockIdx.x - gridDim.x + 1) * (blockDim.x * 4) + threadIdx.x * 4;
int col = cellWidth_ * (gridDim.x - blockIdx.x - 1) - threadIdx.x;
if (row < 0) return;
int3 res = { SCORE_MIN, 0, 0 };
if (col == 0) {
int rowPrev = row - gridDim.x * blockDim.x * 4;
if (0 <= rowPrev && rowPrev < rows_) {
int4 prev;
VEC4_ASSIGN(prev, vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)]);
if (prev.x > res.x) { res.x = prev.x; res.y = rowPrev; res.z = cols_ - 1; }
if (prev.y > res.x) { res.x = prev.y; res.y = rowPrev + 1; res.z = cols_ - 1; }
if (prev.z > res.x) { res.x = prev.z; res.y = rowPrev + 2; res.z = cols_ - 1; }
if (prev.w > res.x) { res.x = prev.w; res.y = rowPrev + 3; res.z = cols_ - 1; }
}
}
row -= (col < 0) * (gridDim.x * blockDim.x * 4);
col += (col < 0) * cols_;
Atom atom;
if (0 <= row && row < rows_ && col > 0) {
atom.mch = vBus.mch[(row >> 2) % (gridDim.x * blockDim.x)];
VEC4_ASSIGN(atom.lScr, vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)]);
VEC4_ASSIGN(atom.lAff, vBus.aff[(row >> 2) % (gridDim.x * blockDim.x)]);
} else {
atom.mch = 0;
VEC4_ASSIGN(atom.lScr, INT4_ZERO);
VEC4_ASSIGN(atom.lAff, SCORE4_MIN);
}
hBusScrShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).x;
hBusAffShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).y;
char4 rowCodes = tex1Dfetch(rowTexture, row >> 2);
int del;
for (int i = 0; i < blockDim.x; ++i) {
if (0 <= row && row < rows_) {
char columnCode = tex1Dfetch(colTexture, col);
if (threadIdx.x == 0) {
atom.up = tex1Dfetch(hBusTexture, col);
} else {
atom.up.x = hBusScrShr[threadIdx.x];
atom.up.y = hBusAffShr[threadIdx.x];
}
del = max(atom.up.x - gapOpen_, atom.up.y - gapExtend_);
int ins = max(atom.lScr.x - gapOpen_, atom.lAff.x - gapExtend_);
int mch = atom.mch + sub(columnCode, rowCodes.x);
atom.rScr.x = MAX3(mch, del, ins);
atom.rAff.x = ins;
del = max(atom.rScr.x - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.y - gapOpen_, atom.lAff.y - gapExtend_);
mch = atom.lScr.x + sub(columnCode, rowCodes.y);
atom.rScr.y = MAX3(mch, del, ins);
atom.rAff.y = ins;
del = max(atom.rScr.y - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.z - gapOpen_, atom.lAff.z - gapExtend_);
mch = atom.lScr.y + sub(columnCode, rowCodes.z);
atom.rScr.z = MAX3(mch, del, ins);
atom.rAff.z = ins;
del = max(atom.rScr.z - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.w - gapOpen_, atom.lAff.w - gapExtend_);
mch = atom.lScr.z + sub(columnCode, rowCodes.w);
atom.rScr.w = MAX3(mch, del, ins);
atom.rAff.w = ins;
atom.mch = atom.up.x;
VEC4_ASSIGN(atom.lScr, atom.rScr);
VEC4_ASSIGN(atom.lAff, atom.rAff);
}
__syncthreads();
if (0 <= row && row < rows_) {
if (threadIdx.x == blockDim.x - 1 || i == blockDim.x - 1 || row == rows_ - 4) {
VEC2_ASSIGN(hBus[col], make_int2(atom.rScr.w, del));
} else {
hBusScrShr[threadIdx.x + 1] = atom.rScr.w;
hBusAffShr[threadIdx.x + 1] = del;
}
}
++col;
if (col == cols_) {
if (0 <= row && row < rows_) {
if (atom.rScr.x > res.x) { res.x = atom.rScr.x; res.y = row; res.z = col - 1; }
if (atom.rScr.y > res.x) { res.x = atom.rScr.y; res.y = row + 1; res.z = col - 1; }
if (atom.rScr.z > res.x) { res.x = atom.rScr.z; res.y = row + 2; res.z = col - 1; }
if (atom.rScr.w > res.x) { res.x = atom.rScr.w; res.y = row + 3; res.z = col - 1; }
}
col = 0;
row = row + gridDim.x * blockDim.x * 4;
atom.mch = 0;
VEC4_ASSIGN(atom.lScr, INT4_ZERO);
VEC4_ASSIGN(atom.lAff, SCORE4_MIN);
rowCodes = tex1Dfetch(rowTexture, row >> 2);
}
__syncthreads();
}
if (res.x > results[blockIdx.x * blockDim.x + threadIdx.x].x) {
VEC3_ASSIGN(results[blockIdx.x * blockDim.x + threadIdx.x], res);
}
if (row < 0 || row >= rows_) return;
vBus.mch[(row >> 2) % (gridDim.x * blockDim.x)] = atom.up.x;
VEC4_ASSIGN(vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)], atom.lScr);
VEC4_ASSIGN(vBus.aff[(row >> 2) % (gridDim.x * blockDim.x)], atom.lAff);
}
template<class Sub>
__device__ static void solveShortNormal(int d, VBus vBus, int2* hBus,
int3* results, Sub sub) {
__shared__ int hBusScrShr[MAX_THREADS];
__shared__ int hBusAffShr[MAX_THREADS];
int row = (d + blockIdx.x - gridDim.x + 1) * (blockDim.x * 4) + threadIdx.x * 4;
int col = cellWidth_ * (gridDim.x - blockIdx.x - 1) - threadIdx.x;
if (row < 0 || row >= rows_) return;
Atom atom;
atom.mch = vBus.mch[(row >> 2) % (gridDim.x * blockDim.x)];
VEC4_ASSIGN(atom.lScr, vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)]);
VEC4_ASSIGN(atom.lAff, vBus.aff[(row >> 2) % (gridDim.x * blockDim.x)]);
hBusScrShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).x;
hBusAffShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).y;
const char4 rowCodes = tex1Dfetch(rowTexture, row >> 2);
int del;
for (int i = 0; i < blockDim.x; ++i, ++col) {
char columnCode = tex1Dfetch(colTexture, col);
if (threadIdx.x == 0) {
atom.up = tex1Dfetch(hBusTexture, col);
} else {
atom.up = make_int2(hBusScrShr[threadIdx.x], hBusAffShr[threadIdx.x]);
}
del = max(atom.up.x - gapOpen_, atom.up.y - gapExtend_);
int ins = max(atom.lScr.x - gapOpen_, atom.lAff.x - gapExtend_);
int mch = atom.mch + sub(columnCode, rowCodes.x);
atom.rScr.x = MAX3(mch, del, ins);
atom.rAff.x = ins;
del = max(atom.rScr.x - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.y - gapOpen_, atom.lAff.y - gapExtend_);
mch = atom.lScr.x + sub(columnCode, rowCodes.y);
atom.rScr.y = MAX3(mch, del, ins);
atom.rAff.y = ins;
del = max(atom.rScr.y - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.z - gapOpen_, atom.lAff.z - gapExtend_);
mch = atom.lScr.y + sub(columnCode, rowCodes.z);
atom.rScr.z = MAX3(mch, del, ins);
atom.rAff.z = ins;
del = max(atom.rScr.z - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.w - gapOpen_, atom.lAff.w - gapExtend_);
mch = atom.lScr.z + sub(columnCode, rowCodes.w);
atom.rScr.w = MAX3(mch, del, ins);
atom.rAff.w = ins;
atom.mch = atom.up.x;
VEC4_ASSIGN(atom.lScr, atom.rScr);
VEC4_ASSIGN(atom.lAff, atom.rAff);
__syncthreads();
if (threadIdx.x == blockDim.x - 1 || row == rows_ - 4) {
VEC2_ASSIGN(hBus[col], make_int2(atom.rScr.w, del));
} else {
hBusScrShr[threadIdx.x + 1] = atom.rScr.w;
hBusAffShr[threadIdx.x + 1] = del;
}
__syncthreads();
}
const int vBusIdx = (row >> 2) % (gridDim.x * blockDim.x);
vBus.mch[vBusIdx] = atom.up.x;
VEC4_ASSIGN(vBus.scr[vBusIdx], atom.lScr);
VEC4_ASSIGN(vBus.aff[vBusIdx], atom.lAff);
VEC2_ASSIGN(hBus[col - 1], make_int2(atom.rScr.w, del));
}
template<class Sub>
__global__ static void solveShort(int d, VBus vBus, int2* hBus, int3* results, Sub sub) {
if (blockIdx.x == (gridDim.x - 1)) {
solveShortDelegated(d, vBus, hBus, results, sub);
} else {
solveShortNormal(d, vBus, hBus, results, sub);
}
}
template<class Sub>
__global__ static void solveLong(int d, VBus vBus, int2* hBus, int3* results, Sub sub) {
__shared__ int hBusScrShr[MAX_THREADS];
__shared__ int hBusAffShr[MAX_THREADS];
hBusScrShr[threadIdx.x] = 0;
int row = (d + blockIdx.x - gridDim.x + 1) * (blockDim.x * 4) + threadIdx.x * 4;
int col = cellWidth_ * (gridDim.x - blockIdx.x - 1) - threadIdx.x + blockDim.x;
if (row < 0 || row >= rows_) return;
Atom atom;
atom.mch = vBus.mch[(row >> 2) % (gridDim.x * blockDim.x)];
VEC4_ASSIGN(atom.lScr, vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)]);
VEC4_ASSIGN(atom.lAff, vBus.aff[(row >> 2) % (gridDim.x * blockDim.x)]);
hBusScrShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).x;
hBusAffShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).y;
const char4 rowCodes = tex1Dfetch(rowTexture, row >> 2);
int del;
for (int i = 0; i < cellWidth_ - blockDim.x; ++i, ++col) {
char columnCode = tex1Dfetch(colTexture, col);
if (threadIdx.x == 0) {
atom.up = tex1Dfetch(hBusTexture, col);
} else {
atom.up = make_int2(hBusScrShr[threadIdx.x], hBusAffShr[threadIdx.x]);
}
del = max(atom.up.x - gapOpen_, atom.up.y - gapExtend_);
int ins = max(atom.lScr.x - gapOpen_, atom.lAff.x - gapExtend_);
int mch = atom.mch + sub(columnCode, rowCodes.x);
atom.rScr.x = MAX3(mch, del, ins);
atom.rAff.x = ins;
del = max(atom.rScr.x - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.y - gapOpen_, atom.lAff.y - gapExtend_);
mch = atom.lScr.x + sub(columnCode, rowCodes.y);
atom.rScr.y = MAX3(mch, del, ins);
atom.rAff.y = ins;
del = max(atom.rScr.y - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.z - gapOpen_, atom.lAff.z - gapExtend_);
mch = atom.lScr.y + sub(columnCode, rowCodes.z);
atom.rScr.z = MAX3(mch, del, ins);
atom.rAff.z = ins;
del = max(atom.rScr.z - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.w - gapOpen_, atom.lAff.w - gapExtend_);
mch = atom.lScr.z + sub(columnCode, rowCodes.w);
atom.rScr.w = MAX3(mch, del, ins);
atom.rAff.w = ins;
atom.mch = atom.up.x;
VEC4_ASSIGN(atom.lScr, atom.rScr);
VEC4_ASSIGN(atom.lAff, atom.rAff);
__syncthreads();
if (threadIdx.x == blockDim.x - 1 || row == rows_ - 4) {
VEC2_ASSIGN(hBus[col], make_int2(atom.rScr.w, del));
} else {
hBusScrShr[threadIdx.x + 1] = atom.rScr.w;
hBusAffShr[threadIdx.x + 1] = del;
}
__syncthreads();
}
const int vBusIdx = (row >> 2) % (gridDim.x * blockDim.x);
vBus.mch[vBusIdx] = atom.up.x;
VEC4_ASSIGN(vBus.scr[vBusIdx], atom.lScr);
VEC4_ASSIGN(vBus.aff[vBusIdx], atom.lAff);
VEC2_ASSIGN(hBus[col - 1], make_int2(atom.rScr.w, del));
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// CPU KERNELS
static void* kernel(void* params) {
Context* context = (Context*) params;
int* queryEnd = context->queryEnd;
int* targetEnd = context->targetEnd;
int* outScore = context->outScore;
Chain* query = context->query;
Chain* target = context->target;
Scorer* scorer = context->scorer;
// int score = context->score;
int card = context->card;
int currentCard;
CUDA_SAFE_CALL(hipGetDevice(¤tCard));
if (currentCard != card) {
// CUDA_SAFE_CALL(hipDeviceReset());
CUDA_SAFE_CALL(hipSetDevice(card));
}
int rows = chainGetLength(query);
int cols = chainGetLength(target);
int gapOpen = scorerGetGapOpen(scorer);
int gapExtend = scorerGetGapExtend(scorer);
int scorerLen = scorerGetMaxCode(scorer);
int subLen = scorerLen + 1;
int scalar = scorerIsScalar(scorer);
TIMER_START("Ov end data %d %d", rows, cols);
hipDeviceProp_t properties;
CUDA_SAFE_CALL(hipGetDeviceProperties(&properties, card));
int threads;
int blocks;
if (properties.major < 2) {
threads = THREADS_SM1;
blocks = BLOCKS_SM1;
} else {
threads = THREADS_SM2;
blocks = BLOCKS_SM2;
}
ASSERT(threads * 2 <= cols, "too short gpu target chain");
if (threads * blocks * 2 > cols) {
blocks = (int) (cols / (threads * 2.));
blocks = blocks <= 30 ? blocks : blocks - (blocks % 30);
// LOG("Blocks trimmed to: %d", blocks);
}
int cellHeight = 4 * threads;
int rowsGpu = rows + (4 - rows % 4) % 4;
int colsGpu = cols + (blocks - cols % blocks) % blocks;
int cellWidth = colsGpu / blocks;
int diagonals = blocks + (int) ceil((float) rowsGpu / cellHeight);
int memoryUsedGpu = 0;
int memoryUsedCpu = 0;
/*
LOG("Rows cpu: %d, gpu: %d", rows, rowsGpu);
LOG("Columns cpu: %d, gpu: %d", cols, colsGpu);
LOG("Cell h: %d, w: %d", cellHeight, cellWidth);
LOG("Diagonals: %d", diagonals);
*/
//**************************************************************************
// PADD CHAINS
char* rowCpu = (char*) malloc(rowsGpu * sizeof(char));
memset(rowCpu, scorerLen, (rowsGpu - rows) * sizeof(char));
chainCopyCodes(query, rowCpu + (rowsGpu - rows));
memoryUsedCpu += rowsGpu * sizeof(char);
char* colCpu = (char*) malloc(colsGpu * sizeof(char));
memset(colCpu, scorerLen, (colsGpu - cols) * sizeof(char));
chainCopyCodes(target, colCpu + (colsGpu - cols));
memoryUsedCpu += colsGpu * sizeof(char);
//**************************************************************************
//**************************************************************************
// INIT GPU
size_t rowSize = rowsGpu * sizeof(char);
char4* rowGpu;
CUDA_SAFE_CALL(hipMalloc(&rowGpu, rowSize));
CUDA_SAFE_CALL(hipMemcpy(rowGpu, rowCpu, rowSize, TO_GPU));
CUDA_SAFE_CALL(hipBindTexture(NULL, rowTexture, rowGpu, rowSize));
memoryUsedGpu += rowSize;
size_t colSize = colsGpu * sizeof(char);
char* colGpu;
CUDA_SAFE_CALL(hipMalloc(&colGpu, colSize));
CUDA_SAFE_CALL(hipMemcpy(colGpu, colCpu, colSize, TO_GPU));
CUDA_SAFE_CALL(hipBindTexture(NULL, colTexture, colGpu, colSize));
memoryUsedGpu += colSize;
size_t hBusSize = colsGpu * sizeof(int2);
int2* hBusCpu = (int2*) malloc(hBusSize);
int2* hBusGpu;
for (int i = 0; i < colsGpu; ++i) {
hBusCpu[i] = make_int2(0, SCORE_MIN);
}
CUDA_SAFE_CALL(hipMalloc(&hBusGpu, hBusSize));
CUDA_SAFE_CALL(hipMemcpy(hBusGpu, hBusCpu, hBusSize, TO_GPU));
CUDA_SAFE_CALL(hipBindTexture(NULL, hBusTexture, hBusGpu, hBusSize));
memoryUsedCpu += hBusSize;
memoryUsedGpu += hBusSize;
VBus vBusGpu;
CUDA_SAFE_CALL(hipMalloc(&vBusGpu.mch, blocks * threads * sizeof(int)));
CUDA_SAFE_CALL(hipMalloc(&vBusGpu.scr, blocks * threads * sizeof(int4)));
CUDA_SAFE_CALL(hipMalloc(&vBusGpu.aff, blocks * threads * sizeof(int4)));
memoryUsedGpu += blocks * threads * sizeof(int);
memoryUsedGpu += blocks * threads * sizeof(int4);
memoryUsedGpu += blocks * threads * sizeof(int4);
size_t resultsSize = blocks * threads * sizeof(int3);
int3* resultsCpu = (int3*) malloc(resultsSize);
int3* resultsGpu;
for (int i = 0; i < blocks * threads; ++i) {
resultsCpu[i] = make_int3(SCORE_MIN, 0, 0);
}
CUDA_SAFE_CALL(hipMalloc(&resultsGpu, resultsSize));
CUDA_SAFE_CALL(hipMemcpy(resultsGpu, resultsCpu, resultsSize, TO_GPU));
memoryUsedCpu += resultsSize;
memoryUsedGpu += resultsSize;
size_t subSize = subLen * subLen * sizeof(int);
int* subCpu = (int*) malloc(subSize);
int* subGpu;
for (int i = 0; i < subLen; ++i) {
for (int j = 0; j < subLen; ++j) {
if (i < scorerLen && j < scorerLen) {
subCpu[i * subLen + j] = scorerScore(scorer, i, j);
} else {
subCpu[i * subLen + j] = 0;
}
}
}
CUDA_SAFE_CALL(hipMalloc(&subGpu, subSize));
CUDA_SAFE_CALL(hipMemcpy(subGpu, subCpu, subSize, TO_GPU));
CUDA_SAFE_CALL(hipBindTexture(NULL, subTexture, subGpu, subSize));
memoryUsedCpu += subSize;
memoryUsedGpu += subSize;
CUDA_SAFE_CALL(hipMemcpyToSymbol(match_, &(subCpu[0]), sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(mismatch_, &(subCpu[1]), sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(gapOpen_, &gapOpen, sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(gapExtend_, &gapExtend, sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(scorerLen_, &scorerLen, sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(subLen_, &subLen, sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(rows_, &rowsGpu, sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(cols_, &colsGpu, sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(cellWidth_, &cellWidth, sizeof(int)));
// LOG("Memory used CPU: %fMB", memoryUsedCpu / 1024. / 1024.);
LOG("Memory used GPU: %fMB", memoryUsedGpu / 1024. / 1024.);
//**************************************************************************
//**************************************************************************
// KERNEL RUN
// TIMER_START("Kernel");
for (int diagonal = 0; diagonal < diagonals; ++diagonal) {
if (scalar) {
hipLaunchKernelGGL(( solveShort), dim3(blocks), dim3(threads) , 0, 0, diagonal, vBusGpu, hBusGpu, resultsGpu, SubScalarRev());
hipLaunchKernelGGL(( solveLong), dim3(blocks), dim3(threads) , 0, 0, diagonal, vBusGpu, hBusGpu, resultsGpu, SubScalarRev());
} else {
hipLaunchKernelGGL(( solveShort), dim3(blocks), dim3(threads) , 0, 0, diagonal, vBusGpu, hBusGpu, resultsGpu, SubVector());
hipLaunchKernelGGL(( solveLong), dim3(blocks), dim3(threads) , 0, 0, diagonal, vBusGpu, hBusGpu, resultsGpu, SubVector());
}
}
// TIMER_STOP;
//**************************************************************************
//**************************************************************************
// SAVE RESULTS
CUDA_SAFE_CALL(hipMemcpy(hBusCpu, hBusGpu, hBusSize, FROM_GPU));
CUDA_SAFE_CALL(hipMemcpy(resultsCpu, resultsGpu, resultsSize, FROM_GPU));
int3 res = resultsCpu[0];
for (int i = 1; i < blocks * threads; ++i) {
if (resultsCpu[i].x > res.x) {
res = resultsCpu[i];
}
}
for (int i = colsGpu - cols; i < colsGpu; ++i) {
if (hBusCpu[i].x > res.x) {
res.x = hBusCpu[i].x;
res.y = rowsGpu - 1;
res.z = i;
}
}
// restore padding
res.y -= (rowsGpu - rows);
res.z -= (colsGpu - cols);
*outScore = res.x;
*queryEnd = res.y;
*targetEnd = res.z;
LOG("Score: %d, (%d, %d)", *outScore, *queryEnd, *targetEnd);
ASSERT(res.y == rows - 1 || res.z == cols - 1, "invalid ov end data");
//**************************************************************************
//**************************************************************************
// CLEAN MEMORY
free(subCpu);
free(rowCpu);
free(colCpu);
free(resultsCpu);
free(hBusCpu);
CUDA_SAFE_CALL(hipFree(subGpu));
CUDA_SAFE_CALL(hipFree(rowGpu));
CUDA_SAFE_CALL(hipFree(colGpu));
CUDA_SAFE_CALL(hipFree(vBusGpu.mch));
CUDA_SAFE_CALL(hipFree(vBusGpu.scr));
CUDA_SAFE_CALL(hipFree(vBusGpu.aff));
CUDA_SAFE_CALL(hipFree(hBusGpu));
CUDA_SAFE_CALL(hipFree(resultsGpu));
CUDA_SAFE_CALL(hipUnbindTexture(rowTexture));
CUDA_SAFE_CALL(hipUnbindTexture(colTexture));
CUDA_SAFE_CALL(hipUnbindTexture(hBusTexture));
CUDA_SAFE_CALL(hipUnbindTexture(subTexture));
free(params);
//**************************************************************************
TIMER_STOP;
return NULL;
}
//------------------------------------------------------------------------------
//******************************************************************************
#endif // __HIPCC__
| ca05a0f2423f9074751a269fdc9125c3610909e4.cu | /*
swsharp - CUDA parallelized Smith Waterman with applying Hirschberg's and
Ukkonen's algorithm and dynamic cell pruning.
Copyright (C) 2013 Matija Korpar, contributor Mile Šikić
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Contact the author by [email protected].
*/
#ifdef __CUDACC__
#include <stdlib.h>
#include <stdio.h>
#include "chain.h"
#include "cuda_utils.h"
#include "constants.h"
#include "error.h"
#include "scorer.h"
#include "thread.h"
#include "utils.h"
#include "gpu_module.h"
#define MAX_THREADS MAX(THREADS_SM1, THREADS_SM2)
#define THREADS_SM1 64
#define BLOCKS_SM1 240
#define THREADS_SM2 128
#define BLOCKS_SM2 480
#define INT4_ZERO make_int4(0, 0, 0, 0)
#define SCORE4_MIN make_int4(SCORE_MIN, SCORE_MIN, SCORE_MIN, SCORE_MIN)
typedef struct Atom {
int mch;
int2 up;
int4 lScr;
int4 lAff;
int4 rScr;
int4 rAff;
} Atom;
typedef struct VBus {
int* mch;
int4* scr;
int4* aff;
} VBus;
typedef struct Context {
int* queryEnd;
int* targetEnd;
int* outScore;
Chain* query;
Chain* target;
Scorer* scorer;
int score;
int card;
} Context;
static __constant__ int gapOpen_;
static __constant__ int gapExtend_;
static __constant__ int rows_;
static __constant__ int cols_;
static __constant__ int cellWidth_;
static __constant__ int scorerLen_;
static __constant__ int subLen_;
static __constant__ int match_;
static __constant__ int mismatch_;
texture<char4> rowTexture;
texture<char> colTexture;
texture<int2> hBusTexture;
texture<int> subTexture;
//******************************************************************************
// PUBLIC
extern void ovEndDataGpu(int* queryEnd, int* targetEnd, int* outScore,
Chain* query, Chain* target, Scorer* scorer, int score, int card,
Thread* thread);
//******************************************************************************
//******************************************************************************
// PRIVATE
// With visual c++ compiler and prototypes declared cuda global memory variables
// do not work. No questions asked.
#ifndef _WIN32
template<class Sub>
__device__ static void solveShortDelegated(int d, VBus vBus, int2* hBus,
int3* results, Sub sub);
template<class Sub>
__device__ static void solveShortNormal(int d, VBus vBus, int2* hBus,
int3* results, Sub sub);
template<class Sub>
__global__ static void solveShort(int d, VBus vBus, int2* hBus, int3* results,
Sub sub);
template<class Sub>
__global__ static void solveLong(int d, VBus vBus, int2* hBus, int3* results,
Sub sub);
#endif
static void* kernel(void* params);
//******************************************************************************
//******************************************************************************
// PUBLIC
extern void ovEndDataGpu(int* queryEnd, int* targetEnd, int* outScore,
Chain* query, Chain* target, Scorer* scorer, int score, int card,
Thread* thread) {
Context* param = (Context*) malloc(sizeof(Context));
param->queryEnd = queryEnd;
param->targetEnd = targetEnd;
param->outScore = outScore;
param->query = query;
param->target = target;
param->scorer = scorer;
param->score = score;
param->card = card;
if (thread == NULL) {
kernel(param);
} else {
threadCreate(thread, kernel, (void*) param);
}
}
//******************************************************************************
//******************************************************************************
// PRIVATE
//------------------------------------------------------------------------------
// FUNCTORS
class SubScalarRev {
public:
__device__ int operator () (char a, char b) {
return (a == b ? match_ : mismatch_) * (a < scorerLen_ && b < scorerLen_);
}
};
class SubVector {
public:
__device__ int operator () (char a, char b) {
return tex1Dfetch(subTexture, (a * subLen_) + b);
}
};
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// GPU KERNELS
template<class Sub>
__device__ static void solveShortDelegated(int d, VBus vBus, int2* hBus,
int3* results, Sub sub) {
__shared__ int hBusScrShr[MAX_THREADS];
__shared__ int hBusAffShr[MAX_THREADS];
int row = (d + blockIdx.x - gridDim.x + 1) * (blockDim.x * 4) + threadIdx.x * 4;
int col = cellWidth_ * (gridDim.x - blockIdx.x - 1) - threadIdx.x;
if (row < 0) return;
int3 res = { SCORE_MIN, 0, 0 };
if (col == 0) {
int rowPrev = row - gridDim.x * blockDim.x * 4;
if (0 <= rowPrev && rowPrev < rows_) {
int4 prev;
VEC4_ASSIGN(prev, vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)]);
if (prev.x > res.x) { res.x = prev.x; res.y = rowPrev; res.z = cols_ - 1; }
if (prev.y > res.x) { res.x = prev.y; res.y = rowPrev + 1; res.z = cols_ - 1; }
if (prev.z > res.x) { res.x = prev.z; res.y = rowPrev + 2; res.z = cols_ - 1; }
if (prev.w > res.x) { res.x = prev.w; res.y = rowPrev + 3; res.z = cols_ - 1; }
}
}
row -= (col < 0) * (gridDim.x * blockDim.x * 4);
col += (col < 0) * cols_;
Atom atom;
if (0 <= row && row < rows_ && col > 0) {
atom.mch = vBus.mch[(row >> 2) % (gridDim.x * blockDim.x)];
VEC4_ASSIGN(atom.lScr, vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)]);
VEC4_ASSIGN(atom.lAff, vBus.aff[(row >> 2) % (gridDim.x * blockDim.x)]);
} else {
atom.mch = 0;
VEC4_ASSIGN(atom.lScr, INT4_ZERO);
VEC4_ASSIGN(atom.lAff, SCORE4_MIN);
}
hBusScrShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).x;
hBusAffShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).y;
char4 rowCodes = tex1Dfetch(rowTexture, row >> 2);
int del;
for (int i = 0; i < blockDim.x; ++i) {
if (0 <= row && row < rows_) {
char columnCode = tex1Dfetch(colTexture, col);
if (threadIdx.x == 0) {
atom.up = tex1Dfetch(hBusTexture, col);
} else {
atom.up.x = hBusScrShr[threadIdx.x];
atom.up.y = hBusAffShr[threadIdx.x];
}
del = max(atom.up.x - gapOpen_, atom.up.y - gapExtend_);
int ins = max(atom.lScr.x - gapOpen_, atom.lAff.x - gapExtend_);
int mch = atom.mch + sub(columnCode, rowCodes.x);
atom.rScr.x = MAX3(mch, del, ins);
atom.rAff.x = ins;
del = max(atom.rScr.x - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.y - gapOpen_, atom.lAff.y - gapExtend_);
mch = atom.lScr.x + sub(columnCode, rowCodes.y);
atom.rScr.y = MAX3(mch, del, ins);
atom.rAff.y = ins;
del = max(atom.rScr.y - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.z - gapOpen_, atom.lAff.z - gapExtend_);
mch = atom.lScr.y + sub(columnCode, rowCodes.z);
atom.rScr.z = MAX3(mch, del, ins);
atom.rAff.z = ins;
del = max(atom.rScr.z - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.w - gapOpen_, atom.lAff.w - gapExtend_);
mch = atom.lScr.z + sub(columnCode, rowCodes.w);
atom.rScr.w = MAX3(mch, del, ins);
atom.rAff.w = ins;
atom.mch = atom.up.x;
VEC4_ASSIGN(atom.lScr, atom.rScr);
VEC4_ASSIGN(atom.lAff, atom.rAff);
}
__syncthreads();
if (0 <= row && row < rows_) {
if (threadIdx.x == blockDim.x - 1 || i == blockDim.x - 1 || row == rows_ - 4) {
VEC2_ASSIGN(hBus[col], make_int2(atom.rScr.w, del));
} else {
hBusScrShr[threadIdx.x + 1] = atom.rScr.w;
hBusAffShr[threadIdx.x + 1] = del;
}
}
++col;
if (col == cols_) {
if (0 <= row && row < rows_) {
if (atom.rScr.x > res.x) { res.x = atom.rScr.x; res.y = row; res.z = col - 1; }
if (atom.rScr.y > res.x) { res.x = atom.rScr.y; res.y = row + 1; res.z = col - 1; }
if (atom.rScr.z > res.x) { res.x = atom.rScr.z; res.y = row + 2; res.z = col - 1; }
if (atom.rScr.w > res.x) { res.x = atom.rScr.w; res.y = row + 3; res.z = col - 1; }
}
col = 0;
row = row + gridDim.x * blockDim.x * 4;
atom.mch = 0;
VEC4_ASSIGN(atom.lScr, INT4_ZERO);
VEC4_ASSIGN(atom.lAff, SCORE4_MIN);
rowCodes = tex1Dfetch(rowTexture, row >> 2);
}
__syncthreads();
}
if (res.x > results[blockIdx.x * blockDim.x + threadIdx.x].x) {
VEC3_ASSIGN(results[blockIdx.x * blockDim.x + threadIdx.x], res);
}
if (row < 0 || row >= rows_) return;
vBus.mch[(row >> 2) % (gridDim.x * blockDim.x)] = atom.up.x;
VEC4_ASSIGN(vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)], atom.lScr);
VEC4_ASSIGN(vBus.aff[(row >> 2) % (gridDim.x * blockDim.x)], atom.lAff);
}
template<class Sub>
__device__ static void solveShortNormal(int d, VBus vBus, int2* hBus,
int3* results, Sub sub) {
__shared__ int hBusScrShr[MAX_THREADS];
__shared__ int hBusAffShr[MAX_THREADS];
int row = (d + blockIdx.x - gridDim.x + 1) * (blockDim.x * 4) + threadIdx.x * 4;
int col = cellWidth_ * (gridDim.x - blockIdx.x - 1) - threadIdx.x;
if (row < 0 || row >= rows_) return;
Atom atom;
atom.mch = vBus.mch[(row >> 2) % (gridDim.x * blockDim.x)];
VEC4_ASSIGN(atom.lScr, vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)]);
VEC4_ASSIGN(atom.lAff, vBus.aff[(row >> 2) % (gridDim.x * blockDim.x)]);
hBusScrShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).x;
hBusAffShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).y;
const char4 rowCodes = tex1Dfetch(rowTexture, row >> 2);
int del;
for (int i = 0; i < blockDim.x; ++i, ++col) {
char columnCode = tex1Dfetch(colTexture, col);
if (threadIdx.x == 0) {
atom.up = tex1Dfetch(hBusTexture, col);
} else {
atom.up = make_int2(hBusScrShr[threadIdx.x], hBusAffShr[threadIdx.x]);
}
del = max(atom.up.x - gapOpen_, atom.up.y - gapExtend_);
int ins = max(atom.lScr.x - gapOpen_, atom.lAff.x - gapExtend_);
int mch = atom.mch + sub(columnCode, rowCodes.x);
atom.rScr.x = MAX3(mch, del, ins);
atom.rAff.x = ins;
del = max(atom.rScr.x - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.y - gapOpen_, atom.lAff.y - gapExtend_);
mch = atom.lScr.x + sub(columnCode, rowCodes.y);
atom.rScr.y = MAX3(mch, del, ins);
atom.rAff.y = ins;
del = max(atom.rScr.y - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.z - gapOpen_, atom.lAff.z - gapExtend_);
mch = atom.lScr.y + sub(columnCode, rowCodes.z);
atom.rScr.z = MAX3(mch, del, ins);
atom.rAff.z = ins;
del = max(atom.rScr.z - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.w - gapOpen_, atom.lAff.w - gapExtend_);
mch = atom.lScr.z + sub(columnCode, rowCodes.w);
atom.rScr.w = MAX3(mch, del, ins);
atom.rAff.w = ins;
atom.mch = atom.up.x;
VEC4_ASSIGN(atom.lScr, atom.rScr);
VEC4_ASSIGN(atom.lAff, atom.rAff);
__syncthreads();
if (threadIdx.x == blockDim.x - 1 || row == rows_ - 4) {
VEC2_ASSIGN(hBus[col], make_int2(atom.rScr.w, del));
} else {
hBusScrShr[threadIdx.x + 1] = atom.rScr.w;
hBusAffShr[threadIdx.x + 1] = del;
}
__syncthreads();
}
const int vBusIdx = (row >> 2) % (gridDim.x * blockDim.x);
vBus.mch[vBusIdx] = atom.up.x;
VEC4_ASSIGN(vBus.scr[vBusIdx], atom.lScr);
VEC4_ASSIGN(vBus.aff[vBusIdx], atom.lAff);
VEC2_ASSIGN(hBus[col - 1], make_int2(atom.rScr.w, del));
}
template<class Sub>
__global__ static void solveShort(int d, VBus vBus, int2* hBus, int3* results, Sub sub) {
if (blockIdx.x == (gridDim.x - 1)) {
solveShortDelegated(d, vBus, hBus, results, sub);
} else {
solveShortNormal(d, vBus, hBus, results, sub);
}
}
template<class Sub>
__global__ static void solveLong(int d, VBus vBus, int2* hBus, int3* results, Sub sub) {
__shared__ int hBusScrShr[MAX_THREADS];
__shared__ int hBusAffShr[MAX_THREADS];
hBusScrShr[threadIdx.x] = 0;
int row = (d + blockIdx.x - gridDim.x + 1) * (blockDim.x * 4) + threadIdx.x * 4;
int col = cellWidth_ * (gridDim.x - blockIdx.x - 1) - threadIdx.x + blockDim.x;
if (row < 0 || row >= rows_) return;
Atom atom;
atom.mch = vBus.mch[(row >> 2) % (gridDim.x * blockDim.x)];
VEC4_ASSIGN(atom.lScr, vBus.scr[(row >> 2) % (gridDim.x * blockDim.x)]);
VEC4_ASSIGN(atom.lAff, vBus.aff[(row >> 2) % (gridDim.x * blockDim.x)]);
hBusScrShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).x;
hBusAffShr[threadIdx.x] = tex1Dfetch(hBusTexture, col).y;
const char4 rowCodes = tex1Dfetch(rowTexture, row >> 2);
int del;
for (int i = 0; i < cellWidth_ - blockDim.x; ++i, ++col) {
char columnCode = tex1Dfetch(colTexture, col);
if (threadIdx.x == 0) {
atom.up = tex1Dfetch(hBusTexture, col);
} else {
atom.up = make_int2(hBusScrShr[threadIdx.x], hBusAffShr[threadIdx.x]);
}
del = max(atom.up.x - gapOpen_, atom.up.y - gapExtend_);
int ins = max(atom.lScr.x - gapOpen_, atom.lAff.x - gapExtend_);
int mch = atom.mch + sub(columnCode, rowCodes.x);
atom.rScr.x = MAX3(mch, del, ins);
atom.rAff.x = ins;
del = max(atom.rScr.x - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.y - gapOpen_, atom.lAff.y - gapExtend_);
mch = atom.lScr.x + sub(columnCode, rowCodes.y);
atom.rScr.y = MAX3(mch, del, ins);
atom.rAff.y = ins;
del = max(atom.rScr.y - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.z - gapOpen_, atom.lAff.z - gapExtend_);
mch = atom.lScr.y + sub(columnCode, rowCodes.z);
atom.rScr.z = MAX3(mch, del, ins);
atom.rAff.z = ins;
del = max(atom.rScr.z - gapOpen_, del - gapExtend_);
ins = max(atom.lScr.w - gapOpen_, atom.lAff.w - gapExtend_);
mch = atom.lScr.z + sub(columnCode, rowCodes.w);
atom.rScr.w = MAX3(mch, del, ins);
atom.rAff.w = ins;
atom.mch = atom.up.x;
VEC4_ASSIGN(atom.lScr, atom.rScr);
VEC4_ASSIGN(atom.lAff, atom.rAff);
__syncthreads();
if (threadIdx.x == blockDim.x - 1 || row == rows_ - 4) {
VEC2_ASSIGN(hBus[col], make_int2(atom.rScr.w, del));
} else {
hBusScrShr[threadIdx.x + 1] = atom.rScr.w;
hBusAffShr[threadIdx.x + 1] = del;
}
__syncthreads();
}
const int vBusIdx = (row >> 2) % (gridDim.x * blockDim.x);
vBus.mch[vBusIdx] = atom.up.x;
VEC4_ASSIGN(vBus.scr[vBusIdx], atom.lScr);
VEC4_ASSIGN(vBus.aff[vBusIdx], atom.lAff);
VEC2_ASSIGN(hBus[col - 1], make_int2(atom.rScr.w, del));
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// CPU KERNELS
static void* kernel(void* params) {
Context* context = (Context*) params;
int* queryEnd = context->queryEnd;
int* targetEnd = context->targetEnd;
int* outScore = context->outScore;
Chain* query = context->query;
Chain* target = context->target;
Scorer* scorer = context->scorer;
// int score = context->score;
int card = context->card;
int currentCard;
CUDA_SAFE_CALL(cudaGetDevice(¤tCard));
if (currentCard != card) {
// CUDA_SAFE_CALL(cudaThreadExit());
CUDA_SAFE_CALL(cudaSetDevice(card));
}
int rows = chainGetLength(query);
int cols = chainGetLength(target);
int gapOpen = scorerGetGapOpen(scorer);
int gapExtend = scorerGetGapExtend(scorer);
int scorerLen = scorerGetMaxCode(scorer);
int subLen = scorerLen + 1;
int scalar = scorerIsScalar(scorer);
TIMER_START("Ov end data %d %d", rows, cols);
cudaDeviceProp properties;
CUDA_SAFE_CALL(cudaGetDeviceProperties(&properties, card));
int threads;
int blocks;
if (properties.major < 2) {
threads = THREADS_SM1;
blocks = BLOCKS_SM1;
} else {
threads = THREADS_SM2;
blocks = BLOCKS_SM2;
}
ASSERT(threads * 2 <= cols, "too short gpu target chain");
if (threads * blocks * 2 > cols) {
blocks = (int) (cols / (threads * 2.));
blocks = blocks <= 30 ? blocks : blocks - (blocks % 30);
// LOG("Blocks trimmed to: %d", blocks);
}
int cellHeight = 4 * threads;
int rowsGpu = rows + (4 - rows % 4) % 4;
int colsGpu = cols + (blocks - cols % blocks) % blocks;
int cellWidth = colsGpu / blocks;
int diagonals = blocks + (int) ceil((float) rowsGpu / cellHeight);
int memoryUsedGpu = 0;
int memoryUsedCpu = 0;
/*
LOG("Rows cpu: %d, gpu: %d", rows, rowsGpu);
LOG("Columns cpu: %d, gpu: %d", cols, colsGpu);
LOG("Cell h: %d, w: %d", cellHeight, cellWidth);
LOG("Diagonals: %d", diagonals);
*/
//**************************************************************************
// PADD CHAINS
char* rowCpu = (char*) malloc(rowsGpu * sizeof(char));
memset(rowCpu, scorerLen, (rowsGpu - rows) * sizeof(char));
chainCopyCodes(query, rowCpu + (rowsGpu - rows));
memoryUsedCpu += rowsGpu * sizeof(char);
char* colCpu = (char*) malloc(colsGpu * sizeof(char));
memset(colCpu, scorerLen, (colsGpu - cols) * sizeof(char));
chainCopyCodes(target, colCpu + (colsGpu - cols));
memoryUsedCpu += colsGpu * sizeof(char);
//**************************************************************************
//**************************************************************************
// INIT GPU
size_t rowSize = rowsGpu * sizeof(char);
char4* rowGpu;
CUDA_SAFE_CALL(cudaMalloc(&rowGpu, rowSize));
CUDA_SAFE_CALL(cudaMemcpy(rowGpu, rowCpu, rowSize, TO_GPU));
CUDA_SAFE_CALL(cudaBindTexture(NULL, rowTexture, rowGpu, rowSize));
memoryUsedGpu += rowSize;
size_t colSize = colsGpu * sizeof(char);
char* colGpu;
CUDA_SAFE_CALL(cudaMalloc(&colGpu, colSize));
CUDA_SAFE_CALL(cudaMemcpy(colGpu, colCpu, colSize, TO_GPU));
CUDA_SAFE_CALL(cudaBindTexture(NULL, colTexture, colGpu, colSize));
memoryUsedGpu += colSize;
size_t hBusSize = colsGpu * sizeof(int2);
int2* hBusCpu = (int2*) malloc(hBusSize);
int2* hBusGpu;
for (int i = 0; i < colsGpu; ++i) {
hBusCpu[i] = make_int2(0, SCORE_MIN);
}
CUDA_SAFE_CALL(cudaMalloc(&hBusGpu, hBusSize));
CUDA_SAFE_CALL(cudaMemcpy(hBusGpu, hBusCpu, hBusSize, TO_GPU));
CUDA_SAFE_CALL(cudaBindTexture(NULL, hBusTexture, hBusGpu, hBusSize));
memoryUsedCpu += hBusSize;
memoryUsedGpu += hBusSize;
VBus vBusGpu;
CUDA_SAFE_CALL(cudaMalloc(&vBusGpu.mch, blocks * threads * sizeof(int)));
CUDA_SAFE_CALL(cudaMalloc(&vBusGpu.scr, blocks * threads * sizeof(int4)));
CUDA_SAFE_CALL(cudaMalloc(&vBusGpu.aff, blocks * threads * sizeof(int4)));
memoryUsedGpu += blocks * threads * sizeof(int);
memoryUsedGpu += blocks * threads * sizeof(int4);
memoryUsedGpu += blocks * threads * sizeof(int4);
size_t resultsSize = blocks * threads * sizeof(int3);
int3* resultsCpu = (int3*) malloc(resultsSize);
int3* resultsGpu;
for (int i = 0; i < blocks * threads; ++i) {
resultsCpu[i] = make_int3(SCORE_MIN, 0, 0);
}
CUDA_SAFE_CALL(cudaMalloc(&resultsGpu, resultsSize));
CUDA_SAFE_CALL(cudaMemcpy(resultsGpu, resultsCpu, resultsSize, TO_GPU));
memoryUsedCpu += resultsSize;
memoryUsedGpu += resultsSize;
size_t subSize = subLen * subLen * sizeof(int);
int* subCpu = (int*) malloc(subSize);
int* subGpu;
for (int i = 0; i < subLen; ++i) {
for (int j = 0; j < subLen; ++j) {
if (i < scorerLen && j < scorerLen) {
subCpu[i * subLen + j] = scorerScore(scorer, i, j);
} else {
subCpu[i * subLen + j] = 0;
}
}
}
CUDA_SAFE_CALL(cudaMalloc(&subGpu, subSize));
CUDA_SAFE_CALL(cudaMemcpy(subGpu, subCpu, subSize, TO_GPU));
CUDA_SAFE_CALL(cudaBindTexture(NULL, subTexture, subGpu, subSize));
memoryUsedCpu += subSize;
memoryUsedGpu += subSize;
CUDA_SAFE_CALL(cudaMemcpyToSymbol(match_, &(subCpu[0]), sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(mismatch_, &(subCpu[1]), sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(gapOpen_, &gapOpen, sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(gapExtend_, &gapExtend, sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(scorerLen_, &scorerLen, sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(subLen_, &subLen, sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(rows_, &rowsGpu, sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(cols_, &colsGpu, sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(cellWidth_, &cellWidth, sizeof(int)));
// LOG("Memory used CPU: %fMB", memoryUsedCpu / 1024. / 1024.);
LOG("Memory used GPU: %fMB", memoryUsedGpu / 1024. / 1024.);
//**************************************************************************
//**************************************************************************
// KERNEL RUN
// TIMER_START("Kernel");
for (int diagonal = 0; diagonal < diagonals; ++diagonal) {
if (scalar) {
solveShort<<< blocks, threads >>>(diagonal, vBusGpu, hBusGpu, resultsGpu, SubScalarRev());
solveLong<<< blocks, threads >>>(diagonal, vBusGpu, hBusGpu, resultsGpu, SubScalarRev());
} else {
solveShort<<< blocks, threads >>>(diagonal, vBusGpu, hBusGpu, resultsGpu, SubVector());
solveLong<<< blocks, threads >>>(diagonal, vBusGpu, hBusGpu, resultsGpu, SubVector());
}
}
// TIMER_STOP;
//**************************************************************************
//**************************************************************************
// SAVE RESULTS
CUDA_SAFE_CALL(cudaMemcpy(hBusCpu, hBusGpu, hBusSize, FROM_GPU));
CUDA_SAFE_CALL(cudaMemcpy(resultsCpu, resultsGpu, resultsSize, FROM_GPU));
int3 res = resultsCpu[0];
for (int i = 1; i < blocks * threads; ++i) {
if (resultsCpu[i].x > res.x) {
res = resultsCpu[i];
}
}
for (int i = colsGpu - cols; i < colsGpu; ++i) {
if (hBusCpu[i].x > res.x) {
res.x = hBusCpu[i].x;
res.y = rowsGpu - 1;
res.z = i;
}
}
// restore padding
res.y -= (rowsGpu - rows);
res.z -= (colsGpu - cols);
*outScore = res.x;
*queryEnd = res.y;
*targetEnd = res.z;
LOG("Score: %d, (%d, %d)", *outScore, *queryEnd, *targetEnd);
ASSERT(res.y == rows - 1 || res.z == cols - 1, "invalid ov end data");
//**************************************************************************
//**************************************************************************
// CLEAN MEMORY
free(subCpu);
free(rowCpu);
free(colCpu);
free(resultsCpu);
free(hBusCpu);
CUDA_SAFE_CALL(cudaFree(subGpu));
CUDA_SAFE_CALL(cudaFree(rowGpu));
CUDA_SAFE_CALL(cudaFree(colGpu));
CUDA_SAFE_CALL(cudaFree(vBusGpu.mch));
CUDA_SAFE_CALL(cudaFree(vBusGpu.scr));
CUDA_SAFE_CALL(cudaFree(vBusGpu.aff));
CUDA_SAFE_CALL(cudaFree(hBusGpu));
CUDA_SAFE_CALL(cudaFree(resultsGpu));
CUDA_SAFE_CALL(cudaUnbindTexture(rowTexture));
CUDA_SAFE_CALL(cudaUnbindTexture(colTexture));
CUDA_SAFE_CALL(cudaUnbindTexture(hBusTexture));
CUDA_SAFE_CALL(cudaUnbindTexture(subTexture));
free(params);
//**************************************************************************
TIMER_STOP;
return NULL;
}
//------------------------------------------------------------------------------
//******************************************************************************
#endif // __CUDACC__
|
8d811a401346e2391a47afdeed08e0463f67dd92.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_plus_2_bot;
int xdim0_update_halo_kernel2_xvel_plus_2_bot_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_plus_2_bot;
int ydim0_update_halo_kernel2_xvel_plus_2_bot_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_plus_2_bot;
int xdim1_update_halo_kernel2_xvel_plus_2_bot_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_plus_2_bot;
int ydim1_update_halo_kernel2_xvel_plus_2_bot_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_plus_2_bot * (y) + \
xdim0_update_halo_kernel2_xvel_plus_2_bot * \
ydim0_update_halo_kernel2_xvel_plus_2_bot * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_plus_2_bot * (y) + \
xdim1_update_halo_kernel2_xvel_plus_2_bot * \
ydim1_update_halo_kernel2_xvel_plus_2_bot * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_plus_2_bot_gpu(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, 2, 0)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, 2, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_plus_2_bot(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_bot +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_bot *
ydim0_update_halo_kernel2_xvel_plus_2_bot;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_bot +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_bot *
ydim1_update_halo_kernel2_xvel_plus_2_bot;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_plus_2_bot_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_2_bot(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_xvel_plus_2_bot_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 25))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(25, "update_halo_kernel2_xvel_plus_2_bot");
OPS_kernels[25].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_2_bot_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_plus_2_bot_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_plus_2_bot_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_plus_2_bot_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_2_bot, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_plus_2_bot_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_2_bot, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_plus_2_bot_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_2_bot, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_plus_2_bot_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_2_bot, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_plus_2_bot_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[25].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_xvel_plus_2_bot), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[25].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[25].mpi_time += t2 - t1;
OPS_kernels[25].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[25].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_2_bot(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 25;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 25;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_xvel_plus_2_bot_execute;
if (OPS_diags > 1) {
ops_timing_realloc(25, "update_halo_kernel2_xvel_plus_2_bot");
}
ops_enqueue_kernel(desc);
}
#endif
| 8d811a401346e2391a47afdeed08e0463f67dd92.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_plus_2_bot;
int xdim0_update_halo_kernel2_xvel_plus_2_bot_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_plus_2_bot;
int ydim0_update_halo_kernel2_xvel_plus_2_bot_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_plus_2_bot;
int xdim1_update_halo_kernel2_xvel_plus_2_bot_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_plus_2_bot;
int ydim1_update_halo_kernel2_xvel_plus_2_bot_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_plus_2_bot * (y) + \
xdim0_update_halo_kernel2_xvel_plus_2_bot * \
ydim0_update_halo_kernel2_xvel_plus_2_bot * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_plus_2_bot * (y) + \
xdim1_update_halo_kernel2_xvel_plus_2_bot * \
ydim1_update_halo_kernel2_xvel_plus_2_bot * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_plus_2_bot_gpu(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, 2, 0)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, 2, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_plus_2_bot(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_bot +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_bot *
ydim0_update_halo_kernel2_xvel_plus_2_bot;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_bot +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_bot *
ydim1_update_halo_kernel2_xvel_plus_2_bot;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_plus_2_bot_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_2_bot(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_xvel_plus_2_bot_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 25))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(25, "update_halo_kernel2_xvel_plus_2_bot");
OPS_kernels[25].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_2_bot_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_plus_2_bot_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_plus_2_bot_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_plus_2_bot_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_2_bot, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_plus_2_bot_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_2_bot, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_plus_2_bot_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_2_bot, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_plus_2_bot_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_2_bot, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_plus_2_bot_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[25].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_xvel_plus_2_bot<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[25].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[25].mpi_time += t2 - t1;
OPS_kernels[25].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[25].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_2_bot(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 25;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 25;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_xvel_plus_2_bot_execute;
if (OPS_diags > 1) {
ops_timing_realloc(25, "update_halo_kernel2_xvel_plus_2_bot");
}
ops_enqueue_kernel(desc);
}
#endif
|
80509ed0f04364b3a0a814ba401b973f4ad7b880.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Heap.cuh"
__global__ void heap_test()
{
Heap heap(10);
heap.print();
Lit l = mkLit(7, false);
heap.increment_lit(l, 36);
heap.print();
l = mkLit(3, true);
heap.increment_lit(l, 190);
heap.print();
l = mkLit(6, false);
heap.increment_lit(l, 45);
heap.print();
printf("Done.\n");
}
int main_test_head()
{
hipLaunchKernelGGL(( heap_test) , dim3(1), dim3(1), 0, 0, );
hipDeviceReset();
}
| 80509ed0f04364b3a0a814ba401b973f4ad7b880.cu | #include "Heap.cuh"
__global__ void heap_test()
{
Heap heap(10);
heap.print();
Lit l = mkLit(7, false);
heap.increment_lit(l, 36);
heap.print();
l = mkLit(3, true);
heap.increment_lit(l, 190);
heap.print();
l = mkLit(6, false);
heap.increment_lit(l, 45);
heap.print();
printf("Done.\n");
}
int main_test_head()
{
heap_test <<< 1, 1>>>();
cudaDeviceReset();
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.